file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
KallPap/FRL-SHAC-Extension/examples/cfg/bptt/hopper.yaml
|
params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
algo:
name: adam # ['gd', 'adam', 'sgd', 'lbfgs']
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
actor_logstd_init: -1.0
config:
name: df_hopp_bptt
env_name: dflex
actor_learning_rate: 1e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 128
grad_norm: 1.0
truncate_grads: True
num_actors: 32
player:
determenistic: True
games_num: 6
num_actors: 2
print_stats: True
| 727 |
YAML
| 18.675675 | 48 | 0.580468 |
KallPap/FRL-SHAC-Extension/examples/cfg/bptt/snu_humanoid.yaml
|
params:
diff_env:
name: SNUHumanoidEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 8
algo:
name: adam
network:
actor: ActorStochasticMLP
actor_mlp:
units: [512, 256]
activation: elu
actor_logstd_init: -1.0
config:
name: df_humanoid_ac
env_name: dflex
actor_learning_rate: 2e-3 # adam
lr_schedule: linear # ['constant', 'linear']
obs_rms: True
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 1000
grad_norm: 1.0
truncate_grads: True
num_actors: 16
save_interval: 200
player:
determenistic: True
games_num: 4
num_actors: 1
print_stats: True
| 716 |
YAML
| 17.868421 | 48 | 0.599162 |
KallPap/FRL-SHAC-Extension/examples/cfg/shac/hopper.yaml
|
params:
diff_env:
name: HopperEnv
stochastic_env: True
episode_length: 1000
MM_caching_frequency: 16
network:
actor: ActorStochasticMLP
actor_mlp:
units: [128, 64, 32]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_hopper_shac
actor_learning_rate: 2e-3 # adam
critic_learning_rate: 2e-4 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 2000
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 256
save_interval: 400
player:
determenistic: False
games_num: 1
num_actors: 1
print_stats: True
| 902 |
YAML
| 19.066666 | 48 | 0.600887 |
KallPap/FRL-SHAC-Extension/examples/cfg/shac/cartpole_swing_up.yaml
|
params:
diff_env:
name: CartPoleSwingUpEnv
stochastic_env: True
episode_length: 240
MM_caching_frequency: 4
network:
actor: ActorStochasticMLP #ActorDeterministicMLP
actor_mlp:
units: [64, 64]
activation: elu
critic: CriticMLP
critic_mlp:
units: [64, 64]
activation: elu
config:
name: df_cartpole_swing_up_shac
actor_learning_rate: 1e-2 # adam
critic_learning_rate: 1e-3 # adam
lr_schedule: linear # ['constant', 'linear']
target_critic_alpha: 0.2
obs_rms: True
ret_rms: False
critic_iterations: 16
critic_method: td-lambda # ['td-lambda', 'one-step']
lambda: 0.95
num_batch: 4
gamma: 0.99
betas: [0.7, 0.95] # adam
max_epochs: 500
steps_num: 32
grad_norm: 1.0
truncate_grads: True
num_actors: 64
save_interval: 100
player:
determenistic: True
games_num: 4
num_actors: 4
print_stats: True
| 961 |
YAML
| 20.377777 | 56 | 0.611863 |
KallPap/FRL-SHAC-Extension/examples/logs/tmp/shac/04-23-2024-16-55-19/cfg.yaml
|
params:
config:
actor_learning_rate: 2e-3
betas:
- 0.7
- 0.95
critic_iterations: 16
critic_learning_rate: 2e-3
critic_method: td-lambda
gamma: 0.99
grad_norm: 1.0
lambda: 0.95
lr_schedule: linear
max_epochs: 2000
name: df_ant_shac
num_actors: 64
num_batch: 4
obs_rms: true
player:
determenistic: true
games_num: 1
num_actors: 1
print_stats: true
ret_rms: false
save_interval: 400
steps_num: 32
target_critic_alpha: 0.2
truncate_grads: true
diff_env:
MM_caching_frequency: 16
episode_length: 1000
name: AntEnv
stochastic_env: true
general:
cfg: ./cfg/shac/ant.yaml
checkpoint: Base
device: !!python/object/apply:torch.device
- cpu
logdir: logs/tmp/shac/04-23-2024-16-55-19
no_time_stamp: false
play: false
render: false
seed: 0
test: false
train: true
network:
actor: ActorStochasticMLP
actor_mlp:
activation: elu
units:
- 128
- 64
- 32
critic: CriticMLP
critic_mlp:
activation: elu
units:
- 64
- 64
| 1,149 |
YAML
| 18.166666 | 46 | 0.585727 |
KallPap/FRL-SHAC-Extension/optim/gd.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
from torch.optim.optimizer import Optimizer
class GD(Optimizer):
r"""Implements Pure Gradient Descent algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
"""
def __init__(self, params, lr=1e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(lr=lr)
super(GD, self).__init__(params, defaults)
def __setstate__(self, state):
super(GD, self).__setstate__(state)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
p.add_(p.grad, alpha = -group['lr'])
return loss
| 1,572 |
Python
| 33.955555 | 79 | 0.632952 |
KallPap/FRL-SHAC-Extension/algorithms/shac.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from multiprocessing.sharedctypes import Value
import sys, os
from torch.nn.utils.clip_grad import clip_grad_norm_
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import numpy as np
import copy
import torch
from tensorboardX import SummaryWriter
import yaml
import dflex as df
import envs
import models.actor
import models.critic
from utils.common import *
import utils.torch_utils as tu
from utils.running_mean_std import RunningMeanStd
from utils.dataset import CriticDataset
from utils.time_report import TimeReport
from utils.average_meter import AverageMeter
class SHAC:
def __init__(self, cfg):
env_fn = getattr(envs, cfg["params"]["diff_env"]["name"])
seeding(cfg["params"]["general"]["seed"])
self.env = env_fn(num_envs = cfg["params"]["config"]["num_actors"], \
device = cfg["params"]["general"]["device"], \
render = cfg["params"]["general"]["render"], \
seed = cfg["params"]["general"]["seed"], \
episode_length=cfg["params"]["diff_env"].get("episode_length", 250), \
stochastic_init = cfg["params"]["diff_env"].get("stochastic_env", True), \
MM_caching_frequency = cfg["params"]['diff_env'].get('MM_caching_frequency', 1), \
no_grad = False)
# print('num_envs = ', self.env.num_envs)
# print('num_actions = ', self.env.num_actions)
# print('num_obs = ', self.env.num_obs)
self.num_envs = self.env.num_envs
self.num_obs = self.env.num_obs
self.num_actions = self.env.num_actions
self.max_episode_length = self.env.episode_length
self.device = cfg["params"]["general"]["device"]
self.gamma = cfg['params']['config'].get('gamma', 0.99)
self.critic_method = cfg['params']['config'].get('critic_method', 'one-step') # ['one-step', 'td-lambda']
if self.critic_method == 'td-lambda':
self.lam = cfg['params']['config'].get('lambda', 0.95)
self.steps_num = cfg["params"]["config"]["steps_num"]
self.max_epochs = cfg["params"]["config"]["max_epochs"]
self.actor_lr = float(cfg["params"]["config"]["actor_learning_rate"])
self.critic_lr = float(cfg['params']['config']['critic_learning_rate'])
self.lr_schedule = cfg['params']['config'].get('lr_schedule', 'linear')
self.target_critic_alpha = cfg['params']['config'].get('target_critic_alpha', 0.4)
self.obs_rms = None
if cfg['params']['config'].get('obs_rms', False):
self.obs_rms = RunningMeanStd(shape = (self.num_obs), device = self.device)
self.ret_rms = None
if cfg['params']['config'].get('ret_rms', False):
self.ret_rms = RunningMeanStd(shape = (), device = self.device)
self.rew_scale = cfg['params']['config'].get('rew_scale', 1.0)
self.critic_iterations = cfg['params']['config'].get('critic_iterations', 16)
self.num_batch = cfg['params']['config'].get('num_batch', 4)
self.batch_size = self.num_envs * self.steps_num // self.num_batch
self.name = cfg['params']['config'].get('name', "Ant")
self.truncate_grad = cfg["params"]["config"]["truncate_grads"]
self.grad_norm = cfg["params"]["config"]["grad_norm"]
if cfg['params']['general']['train']:
self.log_dir = cfg["params"]["general"]["logdir"]
os.makedirs(self.log_dir, exist_ok = True)
# save config
save_cfg = copy.deepcopy(cfg)
if 'general' in save_cfg['params']:
deleted_keys = []
for key in save_cfg['params']['general'].keys():
if key in save_cfg['params']['config']:
deleted_keys.append(key)
for key in deleted_keys:
del save_cfg['params']['general'][key]
yaml.dump(save_cfg, open(os.path.join(self.log_dir, 'cfg.yaml'), 'w'))
self.writer = SummaryWriter(os.path.join(self.log_dir, 'log'))
# save interval
self.save_interval = cfg["params"]["config"].get("save_interval", 500)
# stochastic inference
self.stochastic_evaluation = True
else:
self.stochastic_evaluation = not (cfg['params']['config']['player'].get('determenistic', False) or cfg['params']['config']['player'].get('deterministic', False))
self.steps_num = self.env.episode_length
# create actor critic network
self.actor_name = cfg["params"]["network"].get("actor", 'ActorStochasticMLP') # choices: ['ActorDeterministicMLP', 'ActorStochasticMLP']
self.critic_name = cfg["params"]["network"].get("critic", 'CriticMLP')
actor_fn = getattr(models.actor, self.actor_name)
self.actor = actor_fn(self.num_obs, self.num_actions, cfg['params']['network'], device = self.device)
critic_fn = getattr(models.critic, self.critic_name)
self.critic = critic_fn(self.num_obs, cfg['params']['network'], device = self.device)
self.all_params = list(self.actor.parameters()) + list(self.critic.parameters())
self.target_critic = copy.deepcopy(self.critic)
if cfg['params']['general']['train']:
self.save('init_policy')
# initialize optimizer
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), betas = cfg['params']['config']['betas'], lr = self.actor_lr)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), betas = cfg['params']['config']['betas'], lr = self.critic_lr)
# replay buffer
self.obs_buf = torch.zeros((self.steps_num, self.num_envs, self.num_obs), dtype = torch.float32, device = self.device)
self.rew_buf = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.done_mask = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.next_values = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.target_values = torch.zeros((self.steps_num, self.num_envs), dtype = torch.float32, device = self.device)
self.ret = torch.zeros((self.num_envs), dtype = torch.float32, device = self.device)
# for kl divergence computing
self.old_mus = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.old_sigmas = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.mus = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
self.sigmas = torch.zeros((self.steps_num, self.num_envs, self.num_actions), dtype = torch.float32, device = self.device)
# counting variables
self.iter_count = 0
self.step_count = 0
# loss variables
self.episode_length_his = []
self.episode_loss_his = []
self.episode_discounted_loss_his = []
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.best_policy_loss = np.inf
self.actor_loss = np.inf
self.value_loss = np.inf
# average meter
self.episode_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_discounted_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_length_meter = AverageMeter(1, 100).to(self.device)
# timer
self.time_report = TimeReport()
def compute_actor_loss(self, deterministic = False):
rew_acc = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
next_values = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
actor_loss = torch.tensor(0., dtype = torch.float32, device = self.device)
with torch.no_grad():
if self.obs_rms is not None:
obs_rms = copy.deepcopy(self.obs_rms)
if self.ret_rms is not None:
ret_var = self.ret_rms.var.clone()
# initialize trajectory to cut off gradients between episodes.
obs = self.env.initialize_trajectory()
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
for i in range(self.steps_num):
# collect data for critic training
with torch.no_grad():
self.obs_buf[i] = obs.clone()
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, extra_info = self.env.step(torch.tanh(actions))
with torch.no_grad():
raw_rew = rew.clone()
# scale the reward
rew = rew * self.rew_scale
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
if self.ret_rms is not None:
# update ret rms
with torch.no_grad():
self.ret = self.ret * self.gamma + rew
self.ret_rms.update(self.ret)
rew = rew / torch.sqrt(ret_var + 1e-6)
self.episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
next_values[i + 1] = self.target_critic(obs).squeeze(-1)
for id in done_env_ids:
if torch.isnan(extra_info['obs_before_reset'][id]).sum() > 0 \
or torch.isinf(extra_info['obs_before_reset'][id]).sum() > 0 \
or (torch.abs(extra_info['obs_before_reset'][id]) > 1e6).sum() > 0: # ugly fix for nan values
next_values[i + 1, id] = 0.
elif self.episode_length[id] < self.max_episode_length: # early termination
next_values[i + 1, id] = 0.
else: # otherwise, use terminal value critic to estimate the long-term performance
if self.obs_rms is not None:
real_obs = obs_rms.normalize(extra_info['obs_before_reset'][id])
else:
real_obs = extra_info['obs_before_reset'][id]
next_values[i + 1, id] = self.target_critic(real_obs).squeeze(-1)
if (next_values[i + 1] > 1e6).sum() > 0 or (next_values[i + 1] < -1e6).sum() > 0:
print('next value error')
raise ValueError
rew_acc[i + 1, :] = rew_acc[i, :] + gamma * rew
if i < self.steps_num - 1:
actor_loss = actor_loss + (- rew_acc[i + 1, done_env_ids] - self.gamma * gamma[done_env_ids] * next_values[i + 1, done_env_ids]).sum()
else:
# terminate all envs at the end of optimization iteration
actor_loss = actor_loss + (- rew_acc[i + 1, :] - self.gamma * gamma * next_values[i + 1, :]).sum()
# compute gamma for next step
gamma = gamma * self.gamma
# clear up gamma and rew_acc for done envs
gamma[done_env_ids] = 1.
rew_acc[i + 1, done_env_ids] = 0.
# collect data for critic training
with torch.no_grad():
self.rew_buf[i] = rew.clone()
if i < self.steps_num - 1:
self.done_mask[i] = done.clone().to(torch.float32)
else:
self.done_mask[i, :] = 1.
self.next_values[i] = next_values[i + 1].clone()
# collect episode loss
with torch.no_grad():
self.episode_loss -= raw_rew
self.episode_discounted_loss -= self.episode_gamma * raw_rew
self.episode_gamma *= self.gamma
if len(done_env_ids) > 0:
self.episode_loss_meter.update(self.episode_loss[done_env_ids])
self.episode_discounted_loss_meter.update(self.episode_discounted_loss[done_env_ids])
self.episode_length_meter.update(self.episode_length[done_env_ids])
for done_env_id in done_env_ids:
if (self.episode_loss[done_env_id] > 1e6 or self.episode_loss[done_env_id] < -1e6):
print('ep loss error')
raise ValueError
self.episode_loss_his.append(self.episode_loss[done_env_id].item())
self.episode_discounted_loss_his.append(self.episode_discounted_loss[done_env_id].item())
self.episode_length_his.append(self.episode_length[done_env_id].item())
self.episode_loss[done_env_id] = 0.
self.episode_discounted_loss[done_env_id] = 0.
self.episode_length[done_env_id] = 0
self.episode_gamma[done_env_id] = 1.
actor_loss /= self.steps_num * self.num_envs
if self.ret_rms is not None:
actor_loss = actor_loss * torch.sqrt(ret_var + 1e-6)
self.actor_loss = actor_loss.detach().cpu().item()
self.step_count += self.steps_num * self.num_envs
return actor_loss
@torch.no_grad()
def evaluate_policy(self, num_games, deterministic = False):
episode_length_his = []
episode_loss_his = []
episode_discounted_loss_his = []
episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
obs = self.env.reset()
games_cnt = 0
while games_cnt < num_games:
if self.obs_rms is not None:
obs = self.obs_rms.normalize(obs)
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, _ = self.env.step(torch.tanh(actions))
episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
episode_loss -= rew
episode_discounted_loss -= episode_gamma * rew
episode_gamma *= self.gamma
if len(done_env_ids) > 0:
for done_env_id in done_env_ids:
# print('loss = {:.2f}, len = {}'.format(episode_loss[done_env_id].item(), episode_length[done_env_id]))
episode_loss_his.append(episode_loss[done_env_id].item())
episode_discounted_loss_his.append(episode_discounted_loss[done_env_id].item())
episode_length_his.append(episode_length[done_env_id].item())
episode_loss[done_env_id] = 0.
episode_discounted_loss[done_env_id] = 0.
episode_length[done_env_id] = 0
episode_gamma[done_env_id] = 1.
games_cnt += 1
mean_episode_length = np.mean(np.array(episode_length_his))
mean_policy_loss = np.mean(np.array(episode_loss_his))
mean_policy_discounted_loss = np.mean(np.array(episode_discounted_loss_his))
return mean_policy_loss, mean_policy_discounted_loss, mean_episode_length
@torch.no_grad()
def compute_target_values(self):
if self.critic_method == 'one-step':
self.target_values = self.rew_buf + self.gamma * self.next_values
elif self.critic_method == 'td-lambda':
Ai = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
Bi = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
lam = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
for i in reversed(range(self.steps_num)):
lam = lam * self.lam * (1. - self.done_mask[i]) + self.done_mask[i]
Ai = (1.0 - self.done_mask[i]) * (self.lam * self.gamma * Ai + self.gamma * self.next_values[i] + (1. - lam) / (1. - self.lam) * self.rew_buf[i])
Bi = self.gamma * (self.next_values[i] * self.done_mask[i] + Bi * (1.0 - self.done_mask[i])) + self.rew_buf[i]
self.target_values[i] = (1.0 - self.lam) * Ai + lam * Bi
else:
raise NotImplementedError
def compute_critic_loss(self, batch_sample):
predicted_values = self.critic(batch_sample['obs']).squeeze(-1)
target_values = batch_sample['target_values']
critic_loss = ((predicted_values - target_values) ** 2).mean()
return critic_loss
def initialize_env(self):
self.env.clear_grad()
self.env.reset()
@torch.no_grad()
def run(self, num_games):
mean_policy_loss, mean_policy_discounted_loss, mean_episode_length = self.evaluate_policy(num_games = num_games, deterministic = not self.stochastic_evaluation)
# print_info('mean episode loss = {}, mean discounted loss = {}, mean episode length = {}'.format(mean_policy_loss, mean_policy_discounted_loss, mean_episode_length))
def train(self):
self.start_time = time.time()
rews = []
steps = []
# add timers
self.time_report.add_timer("algorithm")
self.time_report.add_timer("compute actor loss")
self.time_report.add_timer("forward simulation")
self.time_report.add_timer("backward simulation")
self.time_report.add_timer("prepare critic dataset")
self.time_report.add_timer("actor training")
self.time_report.add_timer("critic training")
self.time_report.start_timer("algorithm")
# initializations
self.initialize_env()
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
def actor_closure():
self.actor_optimizer.zero_grad()
self.time_report.start_timer("compute actor loss")
self.time_report.start_timer("forward simulation")
actor_loss = self.compute_actor_loss()
self.time_report.end_timer("forward simulation")
self.time_report.start_timer("backward simulation")
actor_loss.backward()
self.time_report.end_timer("backward simulation")
with torch.no_grad():
self.grad_norm_before_clip = tu.grad_norm(self.actor.parameters())
if self.truncate_grad:
clip_grad_norm_(self.actor.parameters(), self.grad_norm)
self.grad_norm_after_clip = tu.grad_norm(self.actor.parameters())
# sanity check
if torch.isnan(self.grad_norm_before_clip) or self.grad_norm_before_clip > 1000000.:
print('NaN gradient')
raise ValueError
self.time_report.end_timer("compute actor loss")
return actor_loss
# main training process
for epoch in range(self.max_epochs):
time_start_epoch = time.time()
# learning rate schedule
if self.lr_schedule == 'linear':
actor_lr = (1e-5 - self.actor_lr) * float(epoch / self.max_epochs) + self.actor_lr
for param_group in self.actor_optimizer.param_groups:
param_group['lr'] = actor_lr
lr = actor_lr
critic_lr = (1e-5 - self.critic_lr) * float(epoch / self.max_epochs) + self.critic_lr
for param_group in self.critic_optimizer.param_groups:
param_group['lr'] = critic_lr
else:
lr = self.actor_lr
# train actor
self.time_report.start_timer("actor training")
self.actor_optimizer.step(actor_closure).detach().item()
self.time_report.end_timer("actor training")
# train critic
# prepare dataset
self.time_report.start_timer("prepare critic dataset")
with torch.no_grad():
self.compute_target_values()
dataset = CriticDataset(self.batch_size, self.obs_buf, self.target_values, drop_last = False)
self.time_report.end_timer("prepare critic dataset")
self.time_report.start_timer("critic training")
self.value_loss = 0.
for j in range(self.critic_iterations):
total_critic_loss = 0.
batch_cnt = 0
for i in range(len(dataset)):
batch_sample = dataset[i]
self.critic_optimizer.zero_grad()
training_critic_loss = self.compute_critic_loss(batch_sample)
training_critic_loss.backward()
# ugly fix for simulation nan problem
for params in self.critic.parameters():
params.grad.nan_to_num_(0.0, 0.0, 0.0)
if self.truncate_grad:
clip_grad_norm_(self.critic.parameters(), self.grad_norm)
self.critic_optimizer.step()
total_critic_loss += training_critic_loss
batch_cnt += 1
self.value_loss = (total_critic_loss / batch_cnt).detach().cpu().item()
# print('value iter {}/{}, loss = {:7.6f}'.format(j + 1, self.critic_iterations, self.value_loss), end='\r')
self.time_report.end_timer("critic training")
self.iter_count += 1
time_end_epoch = time.time()
# logging
time_elapse = time.time() - self.start_time
self.writer.add_scalar('lr/iter', lr, self.iter_count)
self.writer.add_scalar('actor_loss/step', self.actor_loss, self.step_count)
self.writer.add_scalar('actor_loss/iter', self.actor_loss, self.iter_count)
self.writer.add_scalar('value_loss/step', self.value_loss, self.step_count)
self.writer.add_scalar('value_loss/iter', self.value_loss, self.iter_count)
if len(self.episode_loss_his) > 0:
mean_episode_length = self.episode_length_meter.get_mean()
mean_policy_loss = self.episode_loss_meter.get_mean()
mean_policy_discounted_loss = self.episode_discounted_loss_meter.get_mean()
if mean_policy_loss < self.best_policy_loss:
# print_info("save best policy with loss {:.2f}".format(mean_policy_loss))
self.save()
self.best_policy_loss = mean_policy_loss
self.writer.add_scalar('policy_loss/step', mean_policy_loss, self.step_count)
self.writer.add_scalar('policy_loss/time', mean_policy_loss, time_elapse)
self.writer.add_scalar('policy_loss/iter', mean_policy_loss, self.iter_count)
self.writer.add_scalar('rewards/step', -mean_policy_loss, self.step_count)
self.writer.add_scalar('rewards/time', -mean_policy_loss, time_elapse)
self.writer.add_scalar('rewards/iter', -mean_policy_loss, self.iter_count)
rews.append(-mean_policy_loss)
steps.append(self.step_count)
self.writer.add_scalar('policy_discounted_loss/step', mean_policy_discounted_loss, self.step_count)
self.writer.add_scalar('policy_discounted_loss/iter', mean_policy_discounted_loss, self.iter_count)
self.writer.add_scalar('best_policy_loss/step', self.best_policy_loss, self.step_count)
self.writer.add_scalar('best_policy_loss/iter', self.best_policy_loss, self.iter_count)
self.writer.add_scalar('episode_lengths/iter', mean_episode_length, self.iter_count)
self.writer.add_scalar('episode_lengths/step', mean_episode_length, self.step_count)
self.writer.add_scalar('episode_lengths/time', mean_episode_length, time_elapse)
else:
mean_policy_loss = np.inf
mean_policy_discounted_loss = np.inf
mean_episode_length = 0
# print('iter {}: ep loss {:.2f}, ep discounted loss {:.2f}, ep len {:.1f}, fps total {:.2f}, value loss {:.2f}, grad norm before clip {:.2f}, grad norm after clip {:.2f}'.format(\
# self.iter_count, mean_policy_loss, mean_policy_discounted_loss, mean_episode_length, self.steps_num * self.num_envs / (time_end_epoch - time_start_epoch), self.value_loss, self.grad_norm_before_clip, self.grad_norm_after_clip))
self.writer.flush()
if self.save_interval > 0 and (self.iter_count % self.save_interval == 0):
self.save(self.name + "policy_iter{}_reward{:.3f}".format(self.iter_count, -mean_policy_loss))
# update target critic
with torch.no_grad():
alpha = self.target_critic_alpha
for param, param_targ in zip(self.critic.parameters(), self.target_critic.parameters()):
param_targ.data.mul_(alpha)
param_targ.data.add_((1. - alpha) * param.data)
self.time_report.end_timer("algorithm")
self.time_report.report()
self.save('final_policy')
# save reward/length history
self.episode_loss_his = np.array(self.episode_loss_his)
self.episode_discounted_loss_his = np.array(self.episode_discounted_loss_his)
self.episode_length_his = np.array(self.episode_length_his)
np.save(open(os.path.join(self.log_dir, 'episode_loss_his.npy'), 'wb'), self.episode_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_discounted_loss_his.npy'), 'wb'), self.episode_discounted_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_length_his.npy'), 'wb'), self.episode_length_his)
print(rews)
print()
print(steps)
# evaluate the final policy's performance
self.run(self.num_envs)
self.close()
def play(self, cfg):
self.load(cfg['params']['general']['checkpoint'])
self.run(cfg['params']['config']['player']['games_num'])
def save(self, filename = None):
if filename is None:
filename = 'best_policy'
torch.save([self.actor, self.critic, self.target_critic, self.obs_rms, self.ret_rms], os.path.join(self.log_dir, "{}.pt".format(filename)))
def load(self, path):
checkpoint = torch.load(path)
self.actor = checkpoint[0].to(self.device)
self.critic = checkpoint[1].to(self.device)
self.target_critic = checkpoint[2].to(self.device)
self.obs_rms = checkpoint[3].to(self.device)
self.ret_rms = checkpoint[4].to(self.device) if checkpoint[4] is not None else checkpoint[4]
def close(self):
self.writer.close()
| 28,848 |
Python
| 48.146508 | 249 | 0.575395 |
KallPap/FRL-SHAC-Extension/algorithms/bptt.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import sys, os
from torch.nn.utils.clip_grad import clip_grad_norm_
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(project_dir)
import time
import numpy as np
import copy
import torch
from tensorboardX import SummaryWriter
import yaml
import dflex as df
import envs
import models.actor
from optim.gd import GD
from utils.common import *
import utils.torch_utils as tu
from utils.time_report import TimeReport
from utils.average_meter import AverageMeter
from utils.running_mean_std import RunningMeanStd
class BPTT:
def __init__(self, cfg):
env_fn = getattr(envs, cfg["params"]["diff_env"]["name"])
seeding(cfg["params"]["general"]["seed"])
self.env = env_fn(num_envs = cfg["params"]["config"]["num_actors"], \
device = cfg["params"]["general"]["device"], \
render = cfg["params"]["general"]["render"], \
seed = cfg["params"]["general"]["seed"], \
episode_length=cfg["params"]["diff_env"].get("episode_length", 250), \
stochastic_init = cfg["params"]["diff_env"].get("stochastic_env", False), \
MM_caching_frequency = cfg["params"]['diff_env'].get('MM_caching_frequency', 1), \
no_grad = False)
print('num_envs = ', self.env.num_envs)
print('num_actions = ', self.env.num_actions)
print('num_obs = ', self.env.num_obs)
self.num_envs = self.env.num_envs
self.num_obs = self.env.num_obs
self.num_actions = self.env.num_actions
self.max_episode_length = self.env.episode_length
self.device = cfg["params"]["general"]["device"]
self.gamma = cfg['params']['config'].get('gamma', 0.99)
self.steps_num = cfg["params"]["config"]["steps_num"]
self.max_epochs = cfg["params"]["config"]["max_epochs"]
self.actor_lr = float(cfg["params"]["config"]["actor_learning_rate"])
self.lr_schedule = cfg['params']['config'].get('lr_schedule', 'linear')
self.obs_rms = None
if cfg['params']['config'].get('obs_rms', False):
self.obs_rms = RunningMeanStd(shape = (self.num_obs), device = self.device)
self.rew_scale = cfg['params']['config'].get('rew_scale', 1.0)
self.name = cfg['params']['config'].get('name', "Ant")
self.truncate_grad = cfg["params"]["config"]["truncate_grads"]
self.grad_norm = cfg["params"]["config"]["grad_norm"]
if cfg['params']['general']['train']:
self.log_dir = cfg["params"]["general"]["logdir"]
os.makedirs(self.log_dir, exist_ok = True)
# save config
save_cfg = copy.deepcopy(cfg)
if 'general' in save_cfg['params']:
deleted_keys = []
for key in save_cfg['params']['general'].keys():
if key in save_cfg['params']['config']:
deleted_keys.append(key)
for key in deleted_keys:
del save_cfg['params']['general'][key]
yaml.dump(save_cfg, open(os.path.join(self.log_dir, 'cfg.yaml'), 'w'))
self.writer = SummaryWriter(os.path.join(self.log_dir, 'log'))
# save interval
self.save_interval = cfg["params"]["config"].get("save_interval", 500)
# stochastic inference
self.stochastic_evaluation = True
else:
self.stochastic_evaluation = not (cfg['params']['config']['player'].get('determenistic', False) or cfg['params']['config']['player'].get('deterministic', False))
self.steps_num = self.env.episode_length
# create actor critic network
self.algo = cfg["params"]["algo"]['name'] # choices: ['gd', 'adam', 'SGD']
self.actor_name = cfg["params"]["network"].get("actor", 'ActorStochasticMLP') # choices: ['ActorDeterministicMLP', 'ActorStochasticMLP']
actor_fn = getattr(models.actor, self.actor_name)
self.actor = actor_fn(self.num_obs, self.num_actions, cfg['params']['network'], device = self.device)
if cfg['params']['general']['train']:
self.save('init_policy')
# initialize optimizer
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), betas = cfg['params']['config']['betas'], lr = self.actor_lr)
# counting variables
self.iter_count = 0
self.step_count = 0
# loss variables
self.episode_length_his = []
self.episode_loss_his = []
self.episode_discounted_loss_his = []
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.best_policy_loss = np.inf
self.actor_loss = np.inf
# average meter
self.episode_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_discounted_loss_meter = AverageMeter(1, 100).to(self.device)
self.episode_length_meter = AverageMeter(1, 100).to(self.device)
# timer
self.time_report = TimeReport()
def compute_actor_loss(self, deterministic = False):
rew_acc = torch.zeros((self.steps_num + 1, self.num_envs), dtype = torch.float32, device = self.device)
gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
actor_loss = torch.tensor(0., dtype = torch.float32, device = self.device)
with torch.no_grad():
if self.obs_rms is not None:
obs_rms = copy.deepcopy(self.obs_rms)
obs = self.env.initialize_trajectory()
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
for i in range(self.steps_num):
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, extra_info = self.env.step(torch.tanh(actions))
with torch.no_grad():
raw_rew = rew.clone()
# scale the reward
rew = rew * self.rew_scale
if self.obs_rms is not None:
# update obs rms
with torch.no_grad():
self.obs_rms.update(obs)
# normalize the current obs
obs = obs_rms.normalize(obs)
self.episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
# JIE
rew_acc[i + 1, :] = rew_acc[i, :] + gamma * rew
if i < self.steps_num - 1:
actor_loss = actor_loss + (- rew_acc[i + 1, done_env_ids]).sum()
else:
# terminate all envs at the end of optimization iteration
actor_loss = actor_loss + (- rew_acc[i + 1, :]).sum()
# compute gamma for next step
gamma = gamma * self.gamma
# clear up gamma and rew_acc for done envs
gamma[done_env_ids] = 1.
rew_acc[i + 1, done_env_ids] = 0.
# collect episode loss
with torch.no_grad():
self.episode_loss -= raw_rew
self.episode_discounted_loss -= self.episode_gamma * raw_rew
self.episode_gamma *= self.gamma
if len(done_env_ids) > 0:
self.episode_loss_meter.update(self.episode_loss[done_env_ids])
self.episode_discounted_loss_meter.update(self.episode_discounted_loss[done_env_ids])
self.episode_length_meter.update(self.episode_length[done_env_ids])
for done_env_id in done_env_ids:
if (self.episode_loss[done_env_id] > 1e6 or self.episode_loss[done_env_id] < -1e6):
print('ep loss error')
import IPython
IPython.embed()
self.episode_loss_his.append(self.episode_loss[done_env_id].item())
self.episode_discounted_loss_his.append(self.episode_discounted_loss[done_env_id].item())
self.episode_length_his.append(self.episode_length[done_env_id].item())
self.episode_loss[done_env_id] = 0.
self.episode_discounted_loss[done_env_id] = 0.
self.episode_length[done_env_id] = 0
self.episode_gamma[done_env_id] = 1.
actor_loss /= self.steps_num * self.num_envs
self.actor_loss = actor_loss.detach().cpu().item()
self.step_count += self.steps_num * self.num_envs
return actor_loss
@torch.no_grad()
def evaluate_policy(self, num_games, deterministic = False):
episode_length_his = []
episode_loss_his = []
episode_discounted_loss_his = []
episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
episode_length = torch.zeros(self.num_envs, dtype = int)
episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
obs = self.env.reset()
games_cnt = 0
while games_cnt < num_games:
if self.obs_rms is not None:
obs = self.obs_rms.normalize(obs)
actions = self.actor(obs, deterministic = deterministic)
obs, rew, done, _ = self.env.step(torch.tanh(actions))
episode_length += 1
done_env_ids = done.nonzero(as_tuple = False).squeeze(-1)
episode_loss -= rew
episode_discounted_loss -= episode_gamma * rew
episode_gamma *= self.gamma
if len(done_env_ids) > 0:
for done_env_id in done_env_ids:
print('loss = {:.2f}, len = {}'.format(episode_loss[done_env_id].item(), episode_length[done_env_id]))
episode_loss_his.append(episode_loss[done_env_id].item())
episode_discounted_loss_his.append(episode_discounted_loss[done_env_id].item())
episode_length_his.append(episode_length[done_env_id].item())
episode_loss[done_env_id] = 0.
episode_discounted_loss[done_env_id] = 0.
episode_length[done_env_id] = 0
episode_gamma[done_env_id] = 1.
games_cnt += 1
mean_episode_length = np.mean(np.array(episode_length_his))
mean_policy_loss = np.mean(np.array(episode_loss_his))
mean_policy_discounted_loss = np.mean(np.array(episode_discounted_loss_his))
return mean_policy_loss, mean_policy_discounted_loss, mean_episode_length
def initialize_env(self):
self.env.clear_grad()
self.env.reset()
@torch.no_grad()
def run(self, num_games):
mean_policy_loss, mean_policy_discounted_loss, mean_episode_length = self.evaluate_policy(num_games = num_games, deterministic = not self.stochastic_evaluation)
print_info('mean episode loss = {}, mean discounted loss = {}, mean episode length = {}'.format(mean_policy_loss, mean_policy_discounted_loss, mean_episode_length))
def train(self):
self.start_time = time.time()
# timers
self.time_report.add_timer("algorithm")
self.time_report.add_timer("compute actor loss")
self.time_report.add_timer("forward simulation")
self.time_report.add_timer("backward simulation")
self.time_report.add_timer("actor training")
self.time_report.start_timer("algorithm")
self.initialize_env()
self.episode_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_discounted_loss = torch.zeros(self.num_envs, dtype = torch.float32, device = self.device)
self.episode_length = torch.zeros(self.num_envs, dtype = int, device = self.device)
self.episode_gamma = torch.ones(self.num_envs, dtype = torch.float32, device = self.device)
def actor_closure():
self.actor_optimizer.zero_grad()
self.time_report.start_timer("compute actor loss")
self.time_report.start_timer("forward simulation")
actor_loss = self.compute_actor_loss()
self.time_report.end_timer("forward simulation")
self.time_report.start_timer("backward simulation")
actor_loss.backward()
self.time_report.end_timer("backward simulation")
with torch.no_grad():
self.grad_norm_before_clip = tu.grad_norm(self.actor.parameters())
if self.truncate_grad:
clip_grad_norm_(self.actor.parameters(), self.grad_norm)
self.grad_norm_after_clip = tu.grad_norm(self.actor.parameters())
if torch.isnan(self.grad_norm_before_clip):
# JIE
print('here!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! NaN gradient')
import IPython
IPython.embed()
for params in self.actor.parameters():
params.grad.zero_()
if torch.isnan(self.grad_norm_before_clip) or self.grad_norm_before_clip > 1000000.:
self.save("nan_policy")
self.time_report.end_timer("compute actor loss")
return actor_loss
for epoch in range(self.max_epochs):
time_start_epoch = time.time()
if self.lr_schedule == 'linear':
actor_lr = (1e-5 - self.actor_lr) * float(epoch / self.max_epochs) + self.actor_lr
for param_group in self.actor_optimizer.param_groups:
param_group['lr'] = actor_lr
lr = actor_lr
else:
lr = self.actor_lr
# train actor
self.time_report.start_timer("actor training")
self.actor_optimizer.step(actor_closure).detach().item()
self.time_report.end_timer("actor training")
self.iter_count += 1
time_end_epoch = time.time()
# logging
time_elapse = time.time() - self.start_time
self.writer.add_scalar('lr/iter', lr, self.iter_count)
self.writer.add_scalar('actor_loss/step', self.actor_loss, self.step_count)
self.writer.add_scalar('actor_loss/iter', self.actor_loss, self.iter_count)
if len(self.episode_loss_his) > 0:
mean_episode_length = self.episode_length_meter.get_mean()
mean_policy_loss = self.episode_loss_meter.get_mean()
mean_policy_discounted_loss = self.episode_discounted_loss_meter.get_mean()
if mean_policy_loss < self.best_policy_loss:
print_info("save best policy with loss {:.2f}".format(mean_policy_loss))
self.save()
self.best_policy_loss = mean_policy_loss
# self.save("latest_policy")
self.writer.add_scalar('policy_loss/step', mean_policy_loss, self.step_count)
self.writer.add_scalar('policy_loss/time', mean_policy_loss, time_elapse)
self.writer.add_scalar('policy_loss/iter', mean_policy_loss, self.iter_count)
self.writer.add_scalar('rewards/step', -mean_policy_loss, self.step_count)
self.writer.add_scalar('rewards/time', -mean_policy_loss, time_elapse)
self.writer.add_scalar('rewards/iter', -mean_policy_loss, self.iter_count)
self.writer.add_scalar('policy_discounted_loss/step', mean_policy_discounted_loss, self.step_count)
self.writer.add_scalar('policy_discounted_loss/iter', mean_policy_discounted_loss, self.iter_count)
self.writer.add_scalar('best_policy_loss/step', self.best_policy_loss, self.step_count)
self.writer.add_scalar('best_policy_loss/iter', self.best_policy_loss, self.iter_count)
self.writer.add_scalar('episode_lengths/iter', mean_episode_length, self.iter_count)
self.writer.add_scalar('episode_lengths/step', mean_episode_length, self.step_count)
self.writer.add_scalar('episode_lengths/time', mean_episode_length, time_elapse)
else:
mean_policy_loss = np.inf
mean_policy_discounted_loss = np.inf
mean_episode_length = 0
print('iter {}: ep loss {:.2f}, ep discounted loss {:.2f}, ep len {:.1f}, fps total {:.2f}, grad norm before clip {:.2f}, grad norm after clip {:.2f}'.format(\
self.iter_count, mean_policy_loss, mean_policy_discounted_loss, mean_episode_length, self.steps_num * self.num_envs / (time_end_epoch - time_start_epoch), self.grad_norm_before_clip, self.grad_norm_after_clip))
self.writer.flush()
if self.save_interval > 0 and (self.iter_count % self.save_interval == 0):
self.save(self.name + "policy_iter{}_reward{:.3f}".format(self.iter_count, -mean_policy_loss))
self.time_report.end_timer("algorithm")
self.time_report.report()
self.save('final_policy')
# save reward/length history
self.episode_loss_his = np.array(self.episode_loss_his)
self.episode_discounted_loss_his = np.array(self.episode_discounted_loss_his)
self.episode_length_his = np.array(self.episode_length_his)
np.save(open(os.path.join(self.log_dir, 'episode_loss_his.npy'), 'wb'), self.episode_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_discounted_loss_his.npy'), 'wb'), self.episode_discounted_loss_his)
np.save(open(os.path.join(self.log_dir, 'episode_length_his.npy'), 'wb'), self.episode_length_his)
# evaluate the final policy's performance
self.run(self.num_envs)
self.close()
def play(self, cfg):
self.load(cfg['params']['general']['checkpoint'])
self.run(cfg['params']['config']['player']['games_num'])
def save(self, filename = None):
if filename is None:
filename = 'best_policy'
torch.save([self.actor, self.obs_rms], os.path.join(self.log_dir, "{}.pt".format(filename)))
def load(self, path):
checkpoint = torch.load(path)
self.actor = checkpoint[0].to(self.device)
self.obs_rms = checkpoint[1].to(self.device)
def close(self):
self.writer.close()
| 19,636 |
Python
| 45.313679 | 230 | 0.577205 |
KallPap/FRL-SHAC-Extension/models/critic_checkpoint_gd.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
from torch.utils.checkpoint import checkpoint
import torch
import torch.nn as nn
from models import model_utils
import numpy as np
class CriticMLP(nn.Module):
def __init__(self, obs_dim, cfg_network, device='cuda:0'):
super(CriticMLP, self).__init__()
self.device = device
self.layer_dims = [obs_dim] + cfg_network['critic_mlp']['units'] + [1]
init_ = lambda m: model_utils.init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2))
modules = []
for i in range(len(self.layer_dims) - 1):
modules.append(init_(nn.Linear(self.layer_dims[i], self.layer_dims[i + 1])))
if i < len(self.layer_dims) - 2:
modules.append(model_utils.get_activation_func(cfg_network['critic_mlp']['activation']))
modules.append(torch.nn.LayerNorm(self.layer_dims[i + 1]))
self.critic = nn.Sequential(*modules).to(device)
self.obs_dim = obs_dim
def forward(self, observations):
observations.requires_grad_(True)
def custom_forward(*inputs):
return self.critic(inputs[0])
return checkpoint(custom_forward, observations, use_reentrant=True)
| 1,620 |
Python
| 45.314284 | 113 | 0.679012 |
KallPap/FRL-SHAC-Extension/models/model_utils.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_activation_func(activation_name):
if activation_name.lower() == 'tanh':
return nn.Tanh()
elif activation_name.lower() == 'relu':
return nn.ReLU()
elif activation_name.lower() == 'elu':
return nn.ELU()
elif activation_name.lower() == 'identity':
return nn.Identity()
else:
raise NotImplementedError('Actication func {} not defined'.format(activation_name))
| 1,018 |
Python
| 39.759998 | 91 | 0.720039 |
KallPap/FRL-SHAC-Extension/models/actor.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
import numpy as np
from models import model_utils
from torch.utils.checkpoint import checkpoint
class ActorDeterministicMLP(nn.Module):
def __init__(self, obs_dim, action_dim, cfg_network, device='cuda:0'):
super(ActorDeterministicMLP, self).__init__()
self.device = device
self.layer_dims = [obs_dim] + cfg_network['actor_mlp']['units'] + [action_dim]
init_ = lambda m: model_utils.init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
modules = []
for i in range(len(self.layer_dims) - 1):
modules.append(init_(nn.Linear(self.layer_dims[i], self.layer_dims[i + 1])))
if i < len(self.layer_dims) - 2:
modules.append(model_utils.get_activation_func(cfg_network['actor_mlp']['activation']))
modules.append(torch.nn.LayerNorm(self.layer_dims[i+1]))
self.actor = nn.Sequential(*modules).to(device)
self.action_dim = action_dim
self.obs_dim = obs_dim
def get_logstd(self):
# return self.logstd
return None
def forward(self, observations, deterministic = False):
return self.actor(observations)
class ActorStochasticMLP(nn.Module):
def __init__(self, obs_dim, action_dim, cfg_network, device='cuda:0'):
super(ActorStochasticMLP, self).__init__()
self.device = device
self.layer_dims = [obs_dim] + cfg_network['actor_mlp']['units'] + [action_dim]
init_ = lambda m: model_utils.init(m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0), np.sqrt(2))
modules = []
for i in range(len(self.layer_dims) - 1):
modules.append(nn.Linear(self.layer_dims[i], self.layer_dims[i + 1]))
if i < len(self.layer_dims) - 2:
modules.append(model_utils.get_activation_func(cfg_network['actor_mlp']['activation']))
modules.append(torch.nn.LayerNorm(self.layer_dims[i+1]))
else:
modules.append(model_utils.get_activation_func('identity'))
self.mu_net = nn.Sequential(*modules).to(device)
logstd = cfg_network.get('actor_logstd_init', -1.0)
self.logstd = torch.nn.Parameter(torch.ones(action_dim, dtype=torch.float32, device=device) * logstd)
self.action_dim = action_dim
self.obs_dim = obs_dim
def get_logstd(self):
return self.logstd
def forward(self, obs, deterministic=False):
def custom_forward(*inputs):
return self.mu_net(inputs[0])
mu = checkpoint(custom_forward, obs)
if deterministic:
return mu
else:
std = self.logstd.exp()
dist = Normal(mu, std)
sample = dist.rsample()
return sample
def forward_with_dist(self, obs, deterministic=False):
def custom_forward(*inputs):
return self.mu_net(inputs[0])
mu = checkpoint(custom_forward, obs)
std = self.logstd.exp()
if deterministic:
return mu, mu, std
else:
dist = Normal(mu, std)
sample = dist.rsample()
return sample, mu, std
def evaluate_actions_log_probs(self, obs, actions):
def custom_forward(*inputs):
return self.mu_net(inputs[0])
mu = checkpoint(custom_forward, obs)
std = self.logstd.exp()
dist = Normal(mu, std)
return dist.log_prob(actions)
| 4,009 |
Python
| 37.190476 | 113 | 0.615615 |
KallPap/FRL-SHAC-Extension/utils/dataset.py
|
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import numpy as np
class CriticDataset:
def __init__(self, batch_size, obs, target_values, shuffle = False, drop_last = False):
self.obs = obs.view(-1, obs.shape[-1])
self.target_values = target_values.view(-1)
self.batch_size = batch_size
if shuffle:
self.shuffle()
if drop_last:
self.length = self.obs.shape[0] // self.batch_size
else:
self.length = ((self.obs.shape[0] - 1) // self.batch_size) + 1
def shuffle(self):
index = np.random.permutation(self.obs.shape[0])
self.obs = self.obs[index, :]
self.target_values = self.target_values[index]
def __len__(self):
return self.length
def __getitem__(self, index):
start_idx = index * self.batch_size
end_idx = min((index + 1) * self.batch_size, self.obs.shape[0])
return {'obs': self.obs[start_idx:end_idx, :], 'target_values': self.target_values[start_idx:end_idx]}
| 1,420 |
Python
| 37.405404 | 110 | 0.645775 |
greydoubt/nvidia_omniverse_stuff/example_2.py
|
import omni.ext
import omni.ui as ui
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.example.spawn_prims] MyExtension startup")
self._window = ui.Window("Spawn Primitives", width=300, height=300)
with self._window.frame:
with ui.VStack():
def on_click():
print("clicked!")
ui.Button("Spawn Cube", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[omni.example.spawn_prims] MyExtension shutdown")
| 978 |
Python
| 39.791665 | 119 | 0.668712 |
NVlabs/fast-explicit-teleop/README.md
|
# Fast Explicit-Input Assistance for Teleoperation in Clutter
This repository contains the research code for [Fast Explicit-Input Assistance for Teleoperation in Clutter](https://arxiv.org/abs/2402.02612).
The performance of prediction-based assistance for robot teleoperation degrades in unseen or goal-rich environments due to incorrect or quickly-changing intent inferences.
Poor predictions can confuse operators or cause them to change their control input to implicitly signal their goal, resulting in unnatural movement. We present a new assistance algorithm and interface for robotic manipulation where an operator can explicitly communicate a manipulation goal by pointing the end-effector. Rapid optimization and parallel collision checking in a local region around the pointing target enable direct, interactive control over grasp and place pose candidates.
This codebase enables running the explicit and implicit assistance conditions on a simulated environment in Isaac Sim, as used in the experiments. It has a dependency on the [spacemouse extension](https://github.com/NVlabs/spacemouse-extension), which is the device used for teleoperating the robot. Some of the tools and utilities might be helpful as a guidance in developing your own simulation environments and teleoperation interfaces.
# srl.teleop
The codebase is structured as an Isaac Sim Extension. It is currently supported on Isaac Sim 2022.2.1.
## Installation
Clone into `~/Documents/Kit/apps/Isaac-Sim/exts`, and ensure the folder is titled `srl.teleop`.
You could also clone the Extension to a different directory and add it to the list of extensions paths in Isaac Sim. The one above is just used by default.
OpenSCAD is required for trimesh boolean operations (used in collision checking):
sudo apt-get install openscad
### SpaceMouse Setup
Clone the [SpaceMouse extension](https://github.com/NVlabs/spacemouse-extension) and carefully follow the setup instructions to install it.
Currently, the assistance extension won't function without the SpaceMouse extension.
<!-- #### 2022.2.0:
Cortex doesn't declare a module, which seems to prevent imports from other extensions. Add the following to its `extension.toml`:
[[python.module]]
name = "omni.isaac" -->
## Usage
* Run Isaac Sim from the OV Launcher. Activate the extension in the `Window > Extensions` menu by searching for `SRL` and toggling the extension on. Set it to autoload if you don't want to have to enable it manually every launch.
* Click on the new `Teleop Assistance` pane that appeared near the `Stage` pane (right side menus).
* Click `Load World`.
* Open the `SpaceMouse` pane, select your device, and click the `Engage` checkbox.
SpaceMouse input moves the target, visualized with a small axis marker, that the robot tries to move towards. Suggestions will appear, indicated by a ghost gripper, you can hold the `Pull` button to have the target slowly moved to match the target. When you have an object in your gripper, you will see a ghost version of the held object floating along planes in the scene. You can move the robot around as normal and the ghost will move in tandem. You can move the robot as normal until you're happy with where the marker is, then use `Pull` to have the object plopped down into the plane.
| Function | SpaceMouse | SpaceMouse Pro |
|--------------------|------------|----------------|
| Gripper open/close | Left click | Ctrl |
| Pull | Right hold | Alt |
| Home | - | Menu |
| Left View | - | F |
| Right View | - | R |
| Top View | - | T |
| Free View | - | Clockwise (Center on right pad) |
| Rotate View | - | Roll (Top-left on right pad) |
### Recording a Demonstration
Under the Data Logging panel of the extension, enter the operator's name, then click "Start" to begin collecting a demonstration. Press pause when finished and click "Save Data" to store the information into a JSON file at the "Output Directory".
## Development
Run `source ${ISAAC_SIM_ROOT}/setup_python_env.sh` in a shell, then run `code .` in the repository. The included `.vscode` config is based on the one distributed with Isaac Sim.
Omniverse will monitor the Python source files making up the extension and automatically "hot reload" the extension when you save changes.
This repo tracks a VS Code configuration for connecting the debugger to the Python environment while Isaac Sim is running. Enabling the debugger by enabling its host extension brings a performance penalty (even if it isn't connected), so be sure to disable it before judging frame rates.
**Note: You must start with a fresh Omniverse stage every time you open the plugin. Use `File > New From Stage Template > Empty` to clear the stage. Then you can `Load World` and proceed.**
# Contributions
Some parts of this codebase reuse and modify the "Isaac Sim Examples" plugin and Cortex from NVIDIA Isaac Sim.
# Citation
If you find this work useful, please star or fork this repository and cite the following paper:
```
@misc{walker2024fast,
title={Fast Explicit-Input Assistance for Teleoperation in Clutter},
author={Nick Walker and Xuning Yang and Animesh Garg and Maya Cakmak and Dieter Fox and Claudia P\'{e}rez-D'Arpino},
year={2024},
eprint={2402.02612},
archivePrefix={arXiv},
primaryClass={cs.RO}
}
```
| 5,545 |
Markdown
| 60.622222 | 590 | 0.732372 |
NVlabs/fast-explicit-teleop/scripts/render_demo.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import argparse
import os
from pathlib import Path
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Path to hdf5 file')
args = parser.parse_args()
selection_path = Path(args.input_file)
# selection path but without file extension
prefix = ""
out_path = prefix + str(selection_path.parent) + "/" + str(selection_path.stem)
if os.path.exists(out_path + "/operator_view.mp4"):
print("Already rendered, skipping")
exit()
if "lifting" in out_path or "reaching" in out_path:
print("Skipping warm up")
exit()
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": True})
from omni.isaac.core.utils.extensions import enable_extension
# Enable the layers and stage windows in the UI
enable_extension("srl.teleop")
import atexit
def exit_handler():
simulation_app.close()
atexit.register(exit_handler)
import numpy as np
import h5py
import asyncio
import os
from omni.isaac.core.world import World
from srl.teleop.analysis.playback import Playback
import math
import subprocess
import os
import argparse
from tqdm import tqdm
import shlex
def get_hdf5_files(dir_path):
hdf5_files = []
for dirpath, dirnames, filenames in os.walk(dir_path):
for filename in filenames:
if filename.endswith('.hdf5'):
hdf5_files.append(os.path.join(dirpath, filename))
return hdf5_files
def main(input_path):
selection_path = Path(input_path)
# selection path but without file extension
prefix = ""
out_path = prefix + str(selection_path.parent) + "/" + str(selection_path.stem)
if os.path.exists(out_path + "/operator_view.mp4"):
print("Already rendered, skipping")
return
if "lifting" in out_path or "reaching" in out_path:
print("Skipping warm up")
return
# Clear out old renders
os.system(f"rm -rf {out_path}/main {out_path}/secondary {out_path}/table {out_path}/gripper")
with h5py.File(selection_path, 'r') as f:
task = f.attrs["task"]
scene_description = f.attrs["scene_description"]
trajectory = f["frames"][()]
print("**********************")
print(input_path)
print(f"Frames in trajectory: {len(trajectory)}")
frame_duration = len(trajectory) / 60
print(f"Frame duration: {int(frame_duration//60)}:{math.floor(frame_duration % 60)}")
duration = sum([frame['time'] for frame in trajectory])
print(f"Wall clock length: {int(duration//60)}:{math.floor(duration % 60)} ")
filepath_no_ext, ext = os.path.splitext(selection_path)
playback = Playback(task, scene_description, trajectory, save_images_path=filepath_no_ext, half_res=True, every_other_frame=True)
playback._world = World()
playback.setup_scene()
loop = asyncio.get_event_loop()
playback._world.reset()
loop.run_until_complete(playback.setup_post_load())
playback._world.play()
with tqdm(total=len(trajectory)) as pbar:
pbar.set_description("Rendering " + str(selection_path))
while True:
playback._world.step(render=True)
if not playback._world.is_playing():
break
pbar.update(2)
# Rename RenderProduct_Replicator_01 folder to something more descriptive
os.rename(out_path + "/RenderProduct_Replicator_01/rgb", out_path + "/table")
os.rename(out_path + "/RenderProduct_Replicator/rgb", out_path + "/gripper")
os.system(f"rmdir {out_path}/RenderProduct_Replicator {out_path}/RenderProduct_Replicator_01")
# Remove rgb_ prefix from filenames (sudo apt install rename)
os.system(f"find {out_path}/table -type f -name '*' | rename 's/rgb_//'")
os.system(f"find {out_path}/gripper -type f -name '*' | rename 's/rgb_//'")
os.mkdir(out_path + "/main")
os.mkdir(out_path + "/secondary")
for i, frame in enumerate(tqdm(trajectory)):
# frame number as string with leading zeros
frame_str = str(i).zfill(5)
# check if frame file exists
if not os.path.isfile(f"{out_path}/table/{frame_str}.png") or not os.path.isfile(f"{out_path}/gripper/{frame_str}.png"):
continue
if frame["ui_state"]["primary_camera"] == 0:
os.system(f"ln -s ../gripper/{frame_str}.png {out_path}/secondary/{frame_str}.png")
os.system(f"ln -s ../table/{frame_str}.png {out_path}/main/{frame_str}.png")
else:
os.system(f"ln -s ../table/{frame_str}.png {out_path}/secondary/{frame_str}.png")
os.system(f"ln -s ../gripper/{frame_str}.png {out_path}/main/{frame_str}.png")
commands = [f"ffmpeg -framerate 30 -i '{out_path}/main/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/main.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/secondary/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/secondary.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/table/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/table.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/gripper/%05d.png' \
-c:v libx264 -pix_fmt yuv420p -y {out_path}/gripper.mp4",
f"ffmpeg -framerate 30 -i '{out_path}/main/%05d.png' -framerate 30 -i '{out_path}/secondary/%05d.png' -filter_complex '[1]scale=iw/3:ih/3 [pip]; [0][pip] overlay=main_w-overlay_w:0[v]' -map '[v]' -vcodec libx264 -y {out_path}/operator_view.mp4",
]
processes = set()
for cmd in commands:
p = subprocess.Popen(shlex.split(cmd), stdin=subprocess.PIPE)
processes.add(p)
for process in processes:
fout = process.stdin
process.wait()
if process.returncode !=0: raise subprocess.CalledProcessError(process.returncode, process.args)
#os.system(f"rm -rf {out_path}/main {out_path}/secondary {out_path}/table {out_path}/gripper")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='Path to hdf5 file')
args = parser.parse_args()
main(args.input_file)
simulation_app.close()
| 6,125 |
Python
| 35.464286 | 247 | 0.657469 |
NVlabs/fast-explicit-teleop/srl/teleop/base_sample/__init__.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.base_sample.base_sample import BaseSample
| 201 |
Python
| 32.666661 | 79 | 0.781095 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/analysis_extension.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
import asyncio
import omni.ui as ui
from omni.isaac.ui.ui_utils import btn_builder, setup_ui_headers, get_style
import asyncio
import carb
from omni.kit.viewport.utility import get_active_viewport_window
import omni
from srl.teleop.analysis.playback import Playback
from srl.teleop.assistance.logging import is_hdf5_file
from srl.teleop.assistance.ui import str_builder
from srl.spacemouse.ui_utils import xyz_plot_builder
from .ui import joint_state_plot_builder
import numpy as np
import carb
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
import weakref
import omni.ext
import asyncio
from omni.isaac.core import World
from functools import partial
import h5py
class AnalysisExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
self._ext_id = ext_id
menu_items = [MenuItemDescription(name="Teleop Analysis", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())]
self._menu_items = menu_items
add_menu_items(self._menu_items, "SRL")
self._viewport = get_active_viewport_window("Viewport")
self.timeline = omni.timeline.get_timeline_interface()
self._world_buttons = {}
self._plots = {}
self.build_ui(name="Teleop Analysis",
title="Teleop Analysis",
doc_link="",
overview="Provides playback and analysis of saved trajectories",
file_path=os.path.abspath(__file__),
number_of_extra_frames=3,
window_width=350,)
self.build_control_ui(self.get_frame(index=0))
self.build_joint_state_plotting_ui(self.get_frame(index=1))
self._joint_states_plotting_buffer = np.zeros((360, 14))
self._control_plotting_buffer = np.zeros((360, 6))
self._plotting_event_subscription = None
self.playback = None
def get_frame(self, index):
if index >= len(self._extra_frames):
raise Exception("there were {} extra frames created only".format(len(self._extra_frames)))
return self._extra_frames[index]
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def shutdown_cleanup(self):
pass
def _on_snapping_button_event(self, value):
pass
def post_reset_button_event(self):
pass
def post_load_button_event(self):
pass
def _on_load_world(self):
self._world_buttons["Load World"].enabled = False
if self.playback:
self.playback._world_cleanup()
self.playback._world.clear_instance()
self.playback = None
else:
World.clear_instance()
async def _on_load_world_async():
selection_path = self._world_buttons["Trajectory Selection"].get_value_as_string()
if os.path.isdir(selection_path):
return
elif os.path.isfile(selection_path):
with h5py.File(selection_path, 'r') as f:
task = f.attrs["task"]
user = f.attrs["user"]
objects = f.attrs["objects"]
scene_description = f.attrs["scene_description"]
trajectory = f["frames"][()]
filepath_no_ext, ext = os.path.splitext(selection_path)
self.playback = Playback(task, scene_description, trajectory, save_images_path=filepath_no_ext)
if not self._plotting_event_subscription:
self. _plotting_event_subscription = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_plotting_step)
else:
return
await self.playback.load_world_async()
await omni.kit.app.get_app().next_update_async()
self.playback._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True)
self.post_load_button_event()
self.playback._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
self._world_buttons["Load World"].enabled = True
asyncio.ensure_future(_on_load_world_async())
return
def _on_reset(self):
async def _on_reset_async():
if self.playback:
await self.playback.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
asyncio.ensure_future(_on_reset_async())
return
def _on_plotting_step(self, e: carb.events.IEvent):
if not self.playback:
return
robot = self.playback.franka
if robot is not None:
positions = robot.get_joint_positions()[:7]
velocities = robot.get_joint_velocities()[:7]
if positions is not None:
self._joint_states_plotting_buffer = np.roll(self._joint_states_plotting_buffer, shift=1, axis=0)
self._joint_states_plotting_buffer[0, :7] = positions
self._joint_states_plotting_buffer[0, 7:] = velocities
for i in range(7):
self._plots["joint_positions"][i].set_data(*self._joint_states_plotting_buffer[:, i])
self._plots["joint_velocities"][i].set_data(*self._joint_states_plotting_buffer[:, 7 + i])
control = self.playback.control
if control is not None:
self._control_plotting_buffer = np.roll(self._control_plotting_buffer, shift=1, axis=0)
self._control_plotting_buffer[0, :3] = control["trans"]
self._control_plotting_buffer[0, 3:] = control["rot"]
for i in range(3):
self._plots["xyz_plot"][i].set_data(*self._control_plotting_buffer[:, i])
self._plots["xyz_vals"][i].set_value(self._control_plotting_buffer[0, i])
self._plots["rpy_plot"][i].set_data(*self._control_plotting_buffer[:, 3 + i])
self._plots["rpy_vals"][i].set_value(self._control_plotting_buffer[0, 3 + i])
if len(self._plots["xyz_plot"]) == 4:
self._plots["xyz_plot"][3].set_data(*np.linalg.norm(self._control_plotting_buffer[:, :3], axis=1))
self._plots["xyz_vals"][3].set_value(np.linalg.norm(self._control_plotting_buffer[0,:3]))
if len(self._plots["rpy_plot"]) == 4:
self._plots["rpy_plot"][3].set_data(*np.linalg.norm(self._control_plotting_buffer[:, 3:], axis=1))
self._plots["rpy_vals"][3].set_value(np.linalg.norm(self._control_plotting_buffer[0,3:]))
def _enable_all_buttons(self, flag):
for btn_name, btn in self._world_buttons.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = flag
return
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def on_shutdown(self):
self._extra_frames = []
if self._menu_items is not None:
self._window_cleanup()
if self._world_buttons is not None:
self._world_buttons["Load World"].enabled = True
self._enable_all_buttons(False)
self.shutdown_cleanup()
return
def _window_cleanup(self):
remove_menu_items(self._menu_items, "SRL")
self._window = None
self._menu_items = None
self._world_buttons = None
return
def on_stage_event(self, event):
# event_type = omni.usd.StageEventType(event.type)
if event.type == int(omni.usd.StageEventType.CLOSED):
self. _plotting_event_subscription = None
# If the stage is closed before on_startup has run, all of our fields will be undefined
if World.instance() is not None and hasattr(self, "playback"):
self.playback._world_cleanup()
# There's no World now, so in any case the user can load anew!
if hasattr(self, "_world_buttons"):
self._enable_all_buttons(False)
self._world_buttons["Load World"].enabled = True
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
self._world_buttons["Load World"].enabled = False
self._world_buttons["Reset"].enabled = True
self.post_clear_button_event()
return
def build_ui(self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width):
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=True, dockPreference=ui.DockPreference.RIGHT_TOP
)
self._window.deferred_dock_in("Stage", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
self._extra_frames = []
with self._window.frame:
with ui.VStack(spacing=5, height=0):
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="Log Loading",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
def update_load_button_enabled(new_val):
if os.path.splitext(new_val.lower())[1] == ".hdf5":
self._world_buttons["Load World"].enabled = True
else:
self._world_buttons["Load World"].enabled = False
dict = {
"label": "Trajectory File",
"type": "stringfield",
"default_val": os.path.expanduser('~/Documents/trajectories'),
"tooltip": "Output Directory",
"on_clicked_fn": update_load_button_enabled,
"use_folder_picker": True,
"item_filter_fn": is_hdf5_file,
"read_only": False,
}
self._world_buttons["Trajectory Selection"] = str_builder(**dict)
dict = {
"label": "Load",
"type": "button",
"text": "Load",
"tooltip": "Load World and Task",
"on_clicked_fn": self._on_load_world,
}
self._world_buttons["Load World"] = btn_builder(**dict)
self._world_buttons["Load World"].enabled = False
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._world_buttons["Reset"] = btn_builder(**dict)
self._world_buttons["Reset"].enabled = False
with ui.VStack(style=get_style(), spacing=5, height=0):
for i in range(number_of_extra_frames):
self._extra_frames.append(
ui.CollapsableFrame(
title="",
width=ui.Fraction(0.33),
height=0,
visible=False,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
)
def build_control_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
frame.title = "Data"
frame.visible = True
kwargs = {
"label": "XYZ",
"data": [[],[],[]],
"include_norm": False
}
self._plots["xyz_plot"], self._plots[
"xyz_vals"
] = xyz_plot_builder(**kwargs)
kwargs = {
"label": "RPY",
"data": [[],[],[]],
"value_names": ("R", "P", "Y"),
"include_norm": False
}
self._plots["rpy_plot"], self._plots[
"rpy_vals"
] = xyz_plot_builder(**kwargs)
return
def build_joint_state_plotting_ui(self, frame):
frame.collapsed = True
with frame:
with ui.VStack(spacing=5):
frame.title = "Joint States"
frame.visible = True
kwargs = {
"label": "Positions",
"data": [[] for i in range(7)],
"min": -3.14,
"max": 3.14
}
self._plots["joint_positions"] = joint_state_plot_builder(**kwargs)
kwargs = {
"label": "Velocities",
"data": [[] for i in range(7)],
"min": -.45,
"max": .45
}
self._plots["joint_velocities"] = joint_state_plot_builder(**kwargs)
| 14,001 |
Python
| 40.922156 | 156 | 0.526605 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/playback.py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omni.isaac.core.utils.types import ArticulationAction
from srl.teleop.assistance.camera_controls import SwappableViewControls
from srl.teleop.assistance.tasks.lifting import LiftingTask
from srl.teleop.assistance.tasks.reaching import ReachingTask
from srl.teleop.assistance.tasks.sorting import SortingTask
from srl.teleop.assistance.tasks.stacking import StackingTask
from srl.teleop.assistance.tasks.subset_stacking import SubsetStackingTask
from srl.teleop.assistance.viewport import configure_main_viewport, configure_realsense_viewport, get_realsense_viewport, layout_picture_in_picture
from srl.teleop.assistance.viz import viz_axis
from srl.teleop.base_sample.base_sample import BaseSample
import numpy as np
from omni.kit.viewport.utility import get_active_viewport_window
import os
import aiofiles
async def save_frame(im, path):
from io import BytesIO
buffer = BytesIO()
im.save(buffer, format="png")
async with aiofiles.open(path, "wb") as file:
await file.write(buffer.getbuffer())
class Playback(BaseSample):
def __init__(self, task, scene_description, trajectory, save_images_path=None) -> None:
super().__init__()
self.set_world_settings(rendering_dt= 1 / 30, physics_dt=1/60)
self._articulation_controller = None
self.trajectory = trajectory
self.target_marker = None
self.mode = "play_state"
self.franka = None
self.control = None
self.control_raw = None
self._writer = None
self._render_products = []
self._save_images_path = save_images_path
if task == "sorting":
self.task = SortingTask(initial_scene_description=scene_description)
elif task =="stacking":
self.task = StackingTask(initial_scene_description=scene_description)
elif task == "lifting":
self.task = LiftingTask(initial_scene_description=scene_description)
elif task =="subset_stacking":
self.task = SubsetStackingTask(initial_scene_description=scene_description)
elif task =="reaching":
self.task = ReachingTask(initial_scene_description=scene_description)
else:
raise NotImplementedError("No playback for task " + task)
def setup_scene(self):
world = self.get_world()
world.add_task(self.task)
return
def world_cleanup(self):
self._clear_recorder()
async def setup_pre_reset(self):
world = self.get_world()
if world.physics_callback_exists("replay_scene"):
world.remove_physics_callback("replay_scene")
return
async def setup_post_load(self):
scene = self._world.scene
self.franka = scene.get_object(self.task.get_params()["robot_name"])
self.ghosts = [scene.get_object("ghost_franka0"),scene.get_object("ghost_franka1")]
self._object_ghosts = self.task.get_ghost_objects()
self.target_marker = viz_axis("/target_marker", (0,0,0.), (0,0,0,1.), (0.2, 0.2, 0.2))
self._articulation_controller = self.franka.get_articulation_controller()
self.realsense_vp = get_realsense_viewport(self.franka.camera.prim.GetPath())
configure_realsense_viewport(self.realsense_vp)
self.main_vp = get_active_viewport_window("Viewport")
configure_main_viewport(self.main_vp)
layout_picture_in_picture(self.main_vp, self.realsense_vp)
#self._camera_controls = ArcballCameraControls("/OmniverseKit_Persp", focus_delegate=get_focus)
self._camera_controls = SwappableViewControls("/OmniverseKit_Persp",self.main_vp, self.realsense_vp)
self._camera_controls.set_fixed_view()
self._camera_controls.camera.set_resolution((1280 // 2,720 // 2))
self.franka.camera.set_resolution((1280 // 2,720 // 2))
world = self.get_world()
world.play()
world.add_physics_callback("replay_scene", self._on_replay_scene_step)
if self._save_images_path:
self._init_recorder(self._save_images_path, [self._camera_controls.camera, self.franka.camera])
def _clear_recorder(self):
import omni.replicator.core as rep
rep.orchestrator.stop()
if self._writer:
self._writer.detach()
self._writer = None
import omni
stage = omni.usd.get_context().get_stage()
"""for rp in self._render_products:
stage.RemovePrim(rp)
self._render_products.clear()"""
rep.scripts.utils.viewport_manager.destroy_hydra_textures()
def _init_recorder(self, out_path, cameras) -> bool:
import omni.replicator.core as rep
# Init the writer
writer_params = {
"rgb": True
}
try:
self._writer = rep.BasicWriter(output_dir=out_path, **writer_params)
except Exception as e:
return False
# Create or get existing render products
self._render_prods = []
for camera in cameras:
#note
pass
# Attach the render products to the writer
try:
self._writer.attach([camera._render_product_path for camera in cameras])
#self._writer.attach(self._render_prods)
except Exception as e:
return False
rep.orchestrator.run()
return True
def _on_replay_scene_step(self, step_size):
from omni.kit.viewport.utility import get_active_viewport, capture_viewport_to_file
from PIL import Image
from io import BytesIO
import omni.renderer_capture
import asyncio
import time
current_step_i = self._world.current_time_step_index
capture_filename = f"{os.path.expanduser('~/out/')}test{current_step_i}.png"
"""async def wait_on_result():
await cap_obj.wait_for_result(completion_frames=30)
asyncio.ensure_future(wait_on_result())"""
if current_step_i < len(self.trajectory):
frame = self.trajectory[current_step_i]
self.target_marker.set_world_pose(*frame["robot_state"]["target_pose"])
self.control = frame["controls_state"]["filtered"]
self.control_raw = frame["controls_state"]["raw"]
if frame["ui_state"]["primary_camera"] != self._camera_controls.active_index:
self._camera_controls.swap()
if self.mode == "play_actions":
if current_step_i == 0:
self.task.set_object_poses(frame["scene_state"]["poses"])
self._articulation_controller.apply_action(
ArticulationAction(joint_positions=frame["robot_state"]["applied_joint_positions"])
)
else:
self.franka.set_joint_positions(frame["robot_state"]["joint_positions"])
self.task.set_object_poses(frame["scene_state"]["poses"])
ui_state = frame["ui_state"]
ghost_joint_pos = ui_state["robot_ghost_joint_positions"]
if not np.isnan(ghost_joint_pos[0]):
ghost = self.ghosts[0]
ghost.set_joint_positions(ghost_joint_pos)
ghost.show(gripper_only=True)
else:
ghost = self.ghosts[0]
ghost.hide()
ghost_obj_index = ui_state["object_ghost_index"]
if ghost_obj_index != -1:
ghost = list(self._object_ghosts.values())[ghost_obj_index]
ghost.show()
ghost.set_world_pose(*ui_state["object_ghost_pose"])
else:
for _, ghost in self._object_ghosts.items():
ghost.hide()
else:
self.get_world().pause()
self._clear_recorder()
| 8,277 |
Python
| 41.891191 | 147 | 0.627764 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/__init__.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from .analysis_extension import AnalysisExtension
from .playback import Playback
| 224 |
Python
| 31.142853 | 79 | 0.794643 |
NVlabs/fast-explicit-teleop/srl/teleop/analysis/ui.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.ui import color as cl
from scipy.spatial.transform import Rotation
import omni.ui as ui
from omni.isaac.ui.ui_utils import add_separator
LABEL_WIDTH = 160
LABEL_WIDTH_LIGHT = 235
LABEL_HEIGHT = 18
HORIZONTAL_SPACING = 4
colors = [0xFF1515EA, 0xFF5FC054, 0xFFC5822A, 0xFFFF00FF, 0xFF00FFFF, 0xFFFFFF00, 0xFFFF77FF]
def joint_state_plot_builder(label="", data=[], num_joints=7, min=-1, max=1, tooltip=""):
"""Creates a stylized static XYZ plot
Args:
label (str, optional): Label to the left of the UI element. Defaults to "".
data (list(float), optional): Data to plot. Defaults to [].
min (int, optional): Minimum Y Value. Defaults to -1.
max (int, optional): Maximum Y Value. Defaults to "".
tooltip (str, optional): Tooltip to display over the Label.. Defaults to "".
Returns:
list(ui.Plot): list(x_plot, y_plot, z_plot)
"""
with ui.VStack(spacing=5):
with ui.HStack():
ui.Label(label, width=LABEL_WIDTH, alignment=ui.Alignment.LEFT_TOP, tooltip=tooltip)
plot_height = LABEL_HEIGHT * 5 + 13
plot_width = ui.Fraction(1)
with ui.ZStack():
ui.Rectangle(width=plot_width, height=plot_height)
plots = []
for i in range(num_joints):
plot = ui.Plot(
ui.Type.LINE,
min,
max,
*data[i],
value_stride=1,
width=plot_width,
height=plot_height,
style={"color": colors[i], "background_color": 0x0},
)
plots.append(plot)
def update_min(model):
for plot in plots:
plot.scale_min = model.as_float
def update_max(model):
for plot in plots:
plot.scale_max = model.as_float
ui.Spacer(width=5)
with ui.Frame(width=0):
with ui.VStack(spacing=5):
max_model = ui.FloatDrag(
name="Field", width=40, alignment=ui.Alignment.LEFT_BOTTOM, tooltip="Max"
).model
max_model.set_value(max)
min_model = ui.FloatDrag(
name="Field", width=40, alignment=ui.Alignment.LEFT_TOP, tooltip="Min"
).model
min_model.set_value(min)
min_model.add_value_changed_fn(update_min)
max_model.add_value_changed_fn(update_max)
ui.Spacer(width=20)
add_separator()
return plots
| 2,886 |
Python
| 34.207317 | 97 | 0.52876 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/experiment.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.tasks.lifting import LiftingTask
from srl.teleop.assistance.tasks.subset_stacking import SubsetStackingTask
from srl.teleop.assistance.tasks.reaching import ReachingTask
from srl.teleop.assistance.tasks.sorting import SortingTask
from srl.teleop.assistance.tasks.stacking import StackingTask
import numpy as np
from itertools import permutations
SLOT_NAMES = ["3D Mouse Demo", "Control Demo", "Reaching", "Reaching Assist", "Stacking A Warmup", "Stacking A", "Multi-Stacking A", "Stacking B Warmup", "Stacking B", "Multi-Stacking B", "Stacking C Warmup", "Stacking C", "Multi-Stacking C"]
PARTICIPANT_ID = 0
TASK_BY_INDEX = [0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]
CONDITION_BY_INDEX = [0, 0, 1, 0,0,0, 1,1,1, 2,2,2]
CONDITION_ORDERS = list(permutations([0,1,2]))
LATIN_SQUARE = [[0,1,2],
[1,2,0],
[2,0,1]]
def get_ordering(participant_id):
return CONDITION_ORDERS[participant_id % len(CONDITION_ORDERS)]
def configure_for_condition_index(i, task_ui_models, participant_id):
task_i = TASK_BY_INDEX[i]
condition_i = CONDITION_BY_INDEX[i]
if i > 2:
# Counterbalance actual experimental tasks
condition_i = get_ordering(participant_id)[condition_i]
if task_i == 0:
task = LiftingTask(n_cuboids=1, rng=np.random.RandomState(0), max_duration=None)
elif task_i == 1:
task = ReachingTask()
elif task_i == 2:
task = ReachingTask(max_duration=None)
elif task_i == 3:
task = StackingTask(n_cuboids=2, rng=np.random.RandomState(participant_id + 1000 * condition_i), max_duration=None, repeat=False)
elif task_i == 4:
task = StackingTask(n_cuboids=2, rng=np.random.RandomState(participant_id + 1000 * (condition_i + 1)), max_duration=60 * 2, repeat=True)
elif task_i == 5:
task = SubsetStackingTask(rng=np.random.RandomState(LATIN_SQUARE[participant_id % 3][condition_i]))
elif task_i == 6:
task = SortingTask(rng=np.random.RandomState(LATIN_SQUARE[participant_id % 3][condition_i]))
else:
raise Exception("Unknown task index")
if condition_i == 0:
task_ui_models["Surrogates"].set_value(False)
task_ui_models["Suggest Grasps"].set_value(False)
task_ui_models["Suggest Placements"].set_value(False)
elif condition_i == 1:
task_ui_models["Surrogates"].set_value(False)
task_ui_models["Suggest Grasps"].set_value(True)
task_ui_models["Suggest Placements"].set_value(True)
elif condition_i == 2:
task_ui_models["Surrogates"].set_value(True)
task_ui_models["Suggest Grasps"].set_value(True)
task_ui_models["Suggest Placements"].set_value(True)
return task, condition_i
| 2,875 |
Python
| 43.246153 | 242 | 0.676522 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/scene.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.ui_scene import scene as sc
from omni.ui import color as cl
from typing import List
import numpy as np
from scipy.spatial.transform import Rotation
import omni.ui as ui
from srl.teleop.assistance.proposals import InvalidReason
from .proposals import GroupedPoseProposalTable, PlanePlaneProposalTable
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
import omni
import time
class ViewportScene():
def __init__(self, viewport_window: ui.Window, ext_id: str, use_scene_camera: bool=True) -> None:
self._scene_view = None
self._viewport_window = viewport_window
self._ext_id = ext_id
self.manipulator = None
self.use_scene_camera = use_scene_camera
with self._viewport_window.get_frame(ext_id):
if use_scene_camera:
# scene view (default camera-model)
self._scene_view = sc.SceneView()
# register the scene view to get projection and view updates
self._viewport_window.viewport_api.add_scene_view(self._scene_view)
else:
projection = [1e-1, 0, 0, 0]
projection += [0, 1e-1, 0, 0]
projection += [0, 0, 2e-2, 0]
projection += [0, 0, 1, 1]
view = sc.Matrix44.get_translation_matrix(8.5, -4.25, 0) * sc.Matrix44.get_rotation_matrix(-0.5,0.,0.)
self._scene_view = sc.SceneView(projection=projection, view=view)
def add_manipulator(self, manipulator_class: sc.Manipulator):
# add handlers into the scene view's scene
with self._scene_view.scene:
self.manipulator = manipulator_class()
def __del__(self):
self.destroy()
def destroy(self):
if self.manipulator:
self.manipulator.clear()
if self._scene_view:
# empty the scene view
self._scene_view.scene.clear()
# un-register the scene view
if self._viewport_window and self.use_scene_camera:
self._viewport_window.viewport_api.remove_scene_view(self._scene_view)
# remove references
self._viewport_window = None
self._scene_view = None
class AssistanceManipulator(sc.Manipulator):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._plane_table = None
self._grasp_table = None
self._placement_table = None
self._grasp_distribution = None
self._plane_distribution = None
self.cfg_frames_show = True
self.cfg_frames_color = [1.0, 1.0, 1.0, 1.0]
self.cfg_frames_size = 4
self.cfg_names_show = True
self.cfg_names_color = [1.0, 1.0, 0.0, 1.0]
self.cfg_names_size = 20
self.cfg_axes_show = True
self.cfg_axes_length = 0.1
self.cfg_axes_thickness = 4
self.cfg_arrows_show = True
self.cfg_arrows_color = [0.0, 1.0, 1.0, 1.0]
self.cfg_arrows_thickness = 4
self.cm = cm.hot
ncolors = 256
color_array = cm.hot(np.linspace(0.,1., ncolors))
# change alpha values
color_array[:,-1] = np.linspace(0.05,0.7,ncolors)
# create a colormap object
self.cm = LinearSegmentedColormap.from_list(name='hot_alpha',colors=color_array)
def on_build(self):
if not self._plane_table:
return
grasps = self._grasp_table
planes = self._plane_table
if self._plane_distribution is not None:
positions = planes.get_centroids_world()[planes._valid == InvalidReason.VALID.value]
sc.Points(positions.tolist(), colors=[cl(*self.cfg_frames_color)] * len(positions), sizes=[self.cfg_frames_size] * len(positions))
if self._grasp_distribution is not None:
start = time.time()
# This'll only exist if we're actively inferring
valid_mask = grasps._valid == InvalidReason.VALID.value
positions = grasps._poses_world[:, :3, 3][valid_mask]
if len(positions) == 0:
return
score_probs = np.exp(self._grasp_distribution[valid_mask])
score_probs /= np.max(np.abs(score_probs),axis=0)
colors = self.cm(score_probs)
#sc.Points(positions.tolist(), colors=[cl(*color) for color in colors], sizes=[self.cfg_frames_size] * len(positions))
for grasp, color in zip(grasps._poses_world[valid_mask], colors):
with sc.Transform(transform=sc.Matrix44(*grasp.T.flatten())):
sc.Line([0, 0, -0.04], [0, 0, -0.09], color=cl(*color), thickness=3)
sc.Line([0, -.04, -0.04], [0, 0.04, -0.04], color=cl(*color), thickness=3)
sc.Line([0, 0.04, -0.04], [0, 0.04, 0], color=cl(*color), thickness=3)
sc.Line([0, -0.04, -0.04], [0, -0.04, 0], color=cl(*color), thickness=3)
end = time.time()
#print(end - start)
return
# draw names and axes
T = np.eye(4)
for name, position, quaternion in zip(names, positions, quaternions):
# names
T[:3,3] = position
if self.cfg_names_show:
with sc.Transform(transform=sc.Matrix44(*T.T.flatten())):
sc.Label(name, alignment=ui.Alignment.CENTER_TOP, color=cl(*self.cfg_names_color), size=self.cfg_names_size)
# axes
if self.cfg_axes_show:
T[:3,:3] = Rotation.from_quat(quaternion).as_matrix()
with sc.Transform(transform=sc.Matrix44(*T.T.flatten())):
k = self.cfg_axes_length
sc.Line([0, 0, 0], [k, 0, 0], color=cl("#ff0000"), thickness=self.cfg_axes_thickness)
sc.Line([0, 0, 0], [0, k, 0], color=cl("#00ff00"), thickness=self.cfg_axes_thickness)
sc.Line([0, 0, 0], [0, 0, k], color=cl("#0000ff"), thickness=self.cfg_axes_thickness)
def update(self, grasp_table: GroupedPoseProposalTable, placement_table: GroupedPoseProposalTable, plane_table: PlanePlaneProposalTable):
self._grasp_table = grasp_table
self._placement_table = placement_table
self._plane_table = plane_table
# Triggers rebuilding.
self.invalidate()
def set_grasp_distribution(self, distribution):
self._grasp_distribution = distribution
def reset(self):
self._grasp_table = self._placement_table = self._plane_table = None
self.invalidate()
| 6,690 |
Python
| 38.591716 | 142 | 0.589686 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/suggestions.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_type_name, delete_prim, get_all_matching_child_prims
import math
from srl.teleop.assistance.shapenet_import import ShapeNetPrim
from srl.teleop.assistance.transforms import normalized, T2pq, pq2T
import scipy.spatial.transform
def get_cube_symmetry_rotations():
octahedral_group = scipy.spatial.transform.Rotation.create_group('O')
return octahedral_group.as_matrix()
def get_cylinder_symmetry_rotations(n_rotational_steps=20):
results = np.empty((1 + n_rotational_steps, 3, 3))
# X flip
results[0] = np.diag((1,-1,-1))
theta = np.linspace(0, 2 * math.pi, n_rotational_steps)
results[1:, 0,0] = np.cos(theta)
results[1:, 0,1] = -np.sin(theta)
results[1:, 1,0] = np.sin(theta)
results[1:, 1,1] = np.cos(theta)
results[1:, 2,2] = 1
return results
CUBE_SYMMETRY_Rs = get_cube_symmetry_rotations()
CYLINDER_SYMMETRY_Rs = get_cylinder_symmetry_rotations()
def make_grasp_T(t, ay):
az = normalized(-t)
ax = np.cross(ay, az)
T = np.eye(4)
T[:3, 0] = ax
T[:3, 1] = ay
T[:3, 2] = az
T[:3, 3] = t
return T
def make_cuboid_grasp_Ts(block_pick_height):
R = np.eye(3)
t_i = 0
Ts = np.empty((24, 4, 4))
for i in range(3):
t = block_pick_height * R[:, i]
for j in range(2):
ay = R[:, (i + j + 1) % 3]
for sign_1 in [1, -1]:
for sign_2 in [1, -1]:
Ts[t_i] = make_grasp_T(sign_1 * t, sign_2 * ay)
t_i += 1
return Ts
def make_cylinder_grasp_Ts(r, h):
# The cylinder axis centered at (0,0,0) pointing up in +Z
# Some of these are redundant, and the ones that aren't don't lend themselves to stable placement...
as_cuboid_grasps = make_cuboid_grasp_Ts(np.array((r,r,h/2)))
# Point gripper z toward the grasp point, x toward negative world Z
rotational_steps = 20
side_candidates = np.empty((rotational_steps * 2, 4, 4))
for k in range(rotational_steps):
x = (2 * math.pi / rotational_steps) * k
point = np.array((r * np.cos(x), r * np.sin(x), 0))
ay = np.array((-np.sin(x), np.cos(x), 0))
side_candidates[k] = make_grasp_T(point, ay)
side_candidates[k + rotational_steps] = make_grasp_T(point, -ay)
top_candidates = np.empty((rotational_steps * 2, 4, 4))
for k in range(rotational_steps):
x = (2 * math.pi / rotational_steps) * k
point = np.array((0, 0, h / 2))
ay = np.array((np.cos(x), np.sin(x), 0))
top_candidates[k] = make_grasp_T(point, ay)
top_candidates[k + rotational_steps] = make_grasp_T(-point, ay)
return np.vstack((side_candidates, top_candidates))
def make_cone_grasp_Ts(r, h):
return []
def make_cuboid_cuboid_placement_Ts(to_place_size, to_align_with_size):
# Strategy: centroids aligned. Compute all possible pairs of orientations. Put to_place up against
# the side of to_align_with along the x axis
# See https://en.wikipedia.org/wiki/Octahedral_symmetry
Ts = []
for align_R in CUBE_SYMMETRY_Rs:
# We're transforming the sizes to determine the depth of the cube
# in the X direction. Sign doesn't matter.
v_align = np.abs(align_R.dot(to_align_with_size))
for place_R in CUBE_SYMMETRY_Rs:
v_place = np.abs(place_R.dot(to_place_size))
# We have the two cuboids in an arbirtary orientation. Now we stack them next to eachother in X
T = np.identity(4)
# X displacement, with a little epsilon so the collision checker stays clear
T[0,3] = 0.001 + (v_place[0] + v_align[0]) / 2.0
# Orientation wrt to to_align_with. Sub out anchor frame and get just relative orientation
inv_align_R_4 = np.identity(4)
inv_align_R_4[:3,:3] = align_R.T
# How we should rotate the placement...
T[:3,:3] = place_R
# but in the alignment frame
T = inv_align_R_4.dot(T)
Ts.append(T)
return np.array(Ts)
def make_cylinder_cylinder_placement_Ts(to_place_h, anchor_h):
# Placements only for planar faces (+Z, -Z)
Ts = []
for align_R in CYLINDER_SYMMETRY_Rs:
for place_R in CYLINDER_SYMMETRY_Rs:
T = np.identity(4)
# Z displacement
T[2,3] = 0.001 + (to_place_h + anchor_h) / 2.0
# Orientation wrt to to_align_with. Sub out anchor frame and get just relative orientation
inv_align_R_4 = np.identity(4)
inv_align_R_4[:3,:3] = align_R.T
# How we should rotate the placement...
T[:3,:3] = place_R
# but in the alignment frame
T = inv_align_R_4.dot(T)
Ts.append(T)
return np.array(Ts)
def check_grasp_orientation_similarity(
world_grasp_T,
axis_x_filter=None,
axis_x_filter_thresh=0.1,
axis_y_filter=None,
axis_y_filter_thresh=0.1,
axis_z_filter=None,
axis_z_filter_thresh=0.1,
):
to_use_i = []
filters = np.zeros((3,3))
for i, filter in enumerate((axis_x_filter, axis_y_filter, axis_z_filter)):
if filter is None:
continue
to_use_i.append(i)
filters[i,:] = filter
thresh = np.array((axis_x_filter_thresh, axis_y_filter_thresh, axis_z_filter_thresh))
axes_to_check = world_grasp_T[:, :3, to_use_i]
# Get dot products between the axes of the grasps and the filter directions. Batch over the leading
# indices.
scores = 1.0 - np.einsum('...ij,...ji->...i', filters[to_use_i,:], axes_to_check)
# count num thresholds we are under,
threshes_satisfied = (scores < thresh[to_use_i,]).sum(1)
# Should be under all of them
return threshes_satisfied == len(to_use_i)
def generate_candidate_grasps(obj):
prim_type = get_prim_type_name(obj.prim_path)
as_prim = obj.prim
to_world_tf = pq2T(*obj.get_world_pose())
if isinstance(obj, ShapeNetPrim):
#return []
return obj.grasp_annotations
elif prim_type == "Cube":
size = obj.get_world_scale()
block_grasp_Ts = make_cuboid_grasp_Ts(size / 2 - .015)
#res = get_world_block_grasp_Ts(to_world_tf, block_grasp_Ts, axis_z_filter=np.array((0.,0.,-1.)))
return block_grasp_Ts
"""for T in res:
p,q = T2pq(T)
viz_axis(viz_prefix, p, q)"""
elif prim_type == "Cylinder":
height = obj.get_height()
radius = obj.get_radius()
return make_cylinder_grasp_Ts(radius - 0.01, height - 0.01)
elif prim_type == "Mesh":
mesh = dict()
mesh["points"] = mesh.GetPointsAttr().Get()
mesh["normals"] = mesh.GetNormalsAttr().Get()
mesh["vertex_counts"] = mesh.GetFaceVertexCountsAttr().Get()
mesh["vertex_indices"] = mesh.GetFaceVertexIndicesAttr().Get()
else:
# Ignore other objects for now
pass
return np.empty((0,4,4))
def generate_candidate_placements(to_place, to_align_with):
to_place_type = get_prim_type_name(to_place.prim_path)
to_place_prim = to_place.prim
align_T = pq2T(*to_align_with.get_world_pose())
place_T = pq2T(*to_place.get_world_pose())
to_place_type = get_prim_type_name(to_place.prim_path)
to_align_with_type = get_prim_type_name(to_align_with.prim_path)
if to_place_type == "Cube":
to_place_size = to_place.get_world_scale()
if to_align_with_type == "Cube":
to_align_with_size = to_align_with.get_world_scale()
return make_cuboid_cuboid_placement_Ts(to_place_size, to_align_with_size)
elif to_place_type == "Cylinder":
if to_align_with_type == "Cylinder":
return make_cylinder_cylinder_placement_Ts(to_place.get_height(), to_align_with.get_height())
elif to_align_with_type == "Cube":
pass
elif to_place_type == "Mesh":
pass
return np.empty((0,4,4))
| 8,138 |
Python
| 35.173333 | 121 | 0.603219 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/ghost_franka.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Optional, List
import numpy as np
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema, UsdShade, Sdf
import omni
from omni.isaac.core.materials.visual_material import VisualMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
import os
import srl.teleop.assistance
def load_ghost_material(to_path="/Looks/GhostVolumetric"):
if not is_prim_path_valid(to_path):
success = omni.kit.commands.execute(
"CreateMdlMaterialPrim",
mtl_url=os.path.join(srl.teleop.assistance.DATA_DIR, "GhostVolumetric.mdl"),
mtl_name="voltest_02",
mtl_path=Sdf.Path(to_path),
)
shader = UsdShade.Shader(get_prim_at_path(f"{to_path}/Shader"))
material = UsdShade.Material(get_prim_at_path(to_path))
shader.CreateInput("absorption", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(0.8, 0.8, 0.8))
shader.CreateInput("scattering", Sdf.ValueTypeNames.Color3f).Set(Gf.Vec3f(0.5, 0.5, 0.5))
shader.CreateInput("transmission_color", Sdf.ValueTypeNames.Color3f).Set(
Gf.Vec3f(0.1, 1.0, 0.3)
)
shader.CreateInput("emission_color", Sdf.ValueTypeNames.Color3f).Set(
Gf.Vec3f(0.1, 1.0, 0.3)
)
shader.CreateInput("distance_scale", Sdf.ValueTypeNames.Float).Set(1.0)
shader.CreateInput("emissive_scale", Sdf.ValueTypeNames.Float).Set(300.0)
shader.CreateInput("transmission_color", Sdf.ValueTypeNames.Color3f).Set(
Gf.Vec3f(0.3, 1.0, 0.3)
)
else:
shader = UsdShade.Shader(get_prim_at_path(f"{to_path}/Shader"))
material = UsdShade.Material(get_prim_at_path(to_path))
material = VisualMaterial(
name="GhostVolumetric",
prim_path=to_path,
prim=get_prim_at_path(to_path),
shaders_list=[shader],
material=material,
)
material_inputs = {}
for input in material.shaders_list[0].GetInputs():
material_inputs[input.GetFullName()] = input
return material, material_inputs
class GhostFranka(CameraFranka):
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "franka_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
end_effector_prim_name (Optional[str], optional): [description]. Defaults to None.
gripper_dof_names (Optional[List[str]], optional): [description]. Defaults to None.
gripper_open_position (Optional[np.ndarray], optional): [description]. Defaults to None.
gripper_closed_position (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "franka_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
material_path="/Looks/GhostVolumetric"
) -> None:
super().__init__(prim_path, name, usd_path, position, orientation,end_effector_prim_name, gripper_dof_names, gripper_open_position, gripper_closed_position, collision_sensors=False, camera_sensor=False)
self.material, self.material_inputs = load_ghost_material(material_path)
self.material_inputs["inputs:transmission_color"].Set((1.5, 1.5, 1.5))
self.material_inputs["inputs:emission_color"].Set((1.25, 1.25, 1.25))
self.material_inputs["inputs:emissive_scale"].Set(300.)
self._imageable = UsdGeom.Imageable(self.prim)
self.apply_visual_material(self.material)
self.disable_collisions(remove=True)
self.hide()
self._current_color = None
self._current_opacity = None
# Populate simplifed meshes under the right links of the robot
if not is_prim_path_valid(prim_path + "/panda_hand/viz"):
self.viz_palm = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "panda_hand_viz.usd"), prim_path=prim_path + "/panda_hand/viz")
self.viz_left_finger = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "panda_leftfinger_viz.usd"), prim_path=prim_path + "/panda_leftfinger/viz")
self.viz_right_finger = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "panda_rightfinger_viz.usd"), prim_path=prim_path + "/panda_rightfinger/viz")
else:
self.viz_palm = get_prim_at_path(prim_path + "/panda_hand/viz")
self.viz_left_finger = get_prim_at_path(prim_path + "/panda_leftfinger/viz")
self.viz_right_finger = get_prim_at_path(prim_path + "/panda_rightfinger/viz")
for p in [self.viz_left_finger, self.viz_right_finger, self.viz_palm]:
viz_mesh = get_prim_at_path(f"{p.GetPath()}/mesh")
def disable_collisions(self, remove=False):
for p in Usd.PrimRange(self.prim):
if p.HasAPI(UsdPhysics.CollisionAPI):
collision_api = UsdPhysics.CollisionAPI(p)
collision_api.GetCollisionEnabledAttr().Set(False)
if remove:
p.RemoveAPI(UsdPhysics.CollisionAPI)
@property
def visible(self):
return self._imageable.GetVisibilityAttr().Get() != "invisible"
def hide(self):
self._imageable.MakeInvisible()
def show(self, gripper_only=False):
if not gripper_only:
self._imageable.MakeVisible()
else:
for p in [self.viz_left_finger, self.viz_right_finger, self.viz_palm]:
UsdGeom.Imageable(p).MakeVisible()
def set_color(self, color, opacity=1.0):
if color == self._current_color and opacity == self._current_opacity:
# idempotent
return
transmission = 1.0 - opacity
def clip(value):
# Inputs seem to behave differently for 0 and close to 0 for some reason...
return Gf.Vec3f(*np.clip(value, 0.0001, 1.0))
# The colors you don't absorb will shine through.
# The color you emit shows in the absence of other colors
if color == "red":
self.material_inputs["inputs:absorption"].Set((transmission, 0, 0))
elif color == "yellow":
self.material_inputs["inputs:absorption"].Set(clip((.0, .0, transmission)))
elif color == "green":
self.material_inputs["inputs:absorption"].Set(clip((transmission, .0, transmission)))
elif color == "white":
self.material_inputs["inputs:absorption"].Set(clip((opacity, opacity, opacity)))
else:
return
self._current_color = color
self._current_opacity = opacity
| 7,588 |
Python
| 45.558282 | 214 | 0.635213 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/check_collision.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Union
from srl.teleop.assistance.transforms import T2pq_array
import warp as wp
import warp.render
import numpy as np
import time
from pxr import Usd, UsdGeom, UsdSkel, Gf
import trimesh
import quaternion
import carb
DEVICE = wp.get_preferred_device()
#DEVICE = 'cpu'
@wp.func
def cw_min(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.min(a[0], b[0]),
wp.min(a[1], b[1]),
wp.min(a[2], b[2]))
@wp.func
def cw_max(a: wp.vec3, b: wp.vec3):
return wp.vec3(wp.max(a[0], b[0]),
wp.max(a[1], b[1]),
wp.max(a[2], b[2]))
@wp.kernel
def intersect(query_mesh: wp.uint64,
query_mesh_scale: wp.float32,
query_xforms: wp.array(dtype=wp.transform),
fixed_mesh: wp.uint64,
result: wp.array(dtype=int, ndim=2)):
batch, face = wp.tid()
# mesh_0 is assumed to be the query mesh, we launch one thread
# for each face in mesh_0 and test it against the opposing mesh's BVH
# transforms from query -> fixed space
xform = query_xforms[batch]
# load query triangles points and transform to mesh_1's space
# Local scale is useful for checking whether the interior (roughly) of the object would overlap.
v0 = wp.transform_point(xform, wp.mesh_eval_position(query_mesh, face, 1.0, 0.0) * query_mesh_scale)
v1 = wp.transform_point(xform, wp.mesh_eval_position(query_mesh, face, 0.0, 1.0) * query_mesh_scale)
v2 = wp.transform_point(xform, wp.mesh_eval_position(query_mesh, face, 0.0, 0.0) * query_mesh_scale)
# compute bounds of the query triangle
lower = cw_min(cw_min(v0, v1), v2)
upper = cw_max(cw_max(v0, v1), v2)
query = wp.mesh_query_aabb(fixed_mesh, lower, upper)
result[batch][face] = 0
for f in query:
u0 = wp.mesh_eval_position(fixed_mesh, f, 1.0, 0.0)
u1 = wp.mesh_eval_position(fixed_mesh, f, 0.0, 1.0)
u2 = wp.mesh_eval_position(fixed_mesh, f, 0.0, 0.0)
# test for triangle intersection
i = wp.intersect_tri_tri(v0, v1, v2,
u0, u1, u2)
if i > 0:
result[batch][face] = 1
return
# use if you want to count all intersections
#wp.atomic_add(result, batch, i)
@wp.kernel
def grasp_contacts(
mesh_1: wp.uint64,
left_finger_pad_point: wp.vec3,
right_finger_pad_point: wp.vec3,
palm_point: wp.vec3,
xforms: wp.array(dtype=wp.transform),
result: wp.array(dtype=float, ndim=2),
points: wp.array(dtype=wp.vec3, ndim=2)):
batch = wp.tid()
# mesh_0 is assumed to be the query mesh, we launch one thread
# for each face in mesh_0 and test it against the opposing mesh's BVH
# transforms from mesh_0 -> mesh_1 space
xform = xforms[batch]
# load query triangles points and transform to mesh_1's space
left_ray_origin = wp.transform_point(xform, left_finger_pad_point)
right_ray_origin = wp.transform_point(xform, right_finger_pad_point)
palm_ray_origin = wp.transform_point(xform, palm_point)
left_ray_dir = wp.transform_vector(xform, wp.vec3(0., -1., 0.))
right_ray_dir = wp.transform_vector(xform, wp.vec3(0., 1., 0.))
palm_ray_dir = wp.transform_vector(xform, wp.vec3(0., 0., 1.))
left_ray_t = float(0.)
left_ray_sign = float(0.)
u = float(0.)
v = float(0.0)
normal = wp.vec3()
face = int(0)
left_hit = wp.mesh_query_ray(mesh_1, left_ray_origin, left_ray_dir, .1, left_ray_t, u, v, left_ray_sign, normal, face)
right_ray_t = float(0.)
right_ray_sign = float(0.)
right_hit = wp.mesh_query_ray(mesh_1, right_ray_origin, right_ray_dir, .1, right_ray_t, u, v, right_ray_sign, normal, face)
palm_ray_t = float(100.)
palm_ray_sign = float(0.)
palm_hit = wp.mesh_query_ray(mesh_1, palm_ray_origin, palm_ray_dir, .04, palm_ray_t, u, v, palm_ray_sign, normal, face)
#points[batch][0] = left_ray_origin + left_ray_t * left_ray_dir
#points[batch][1] = right_ray_origin + right_ray_t * right_ray_dir
#points[batch][2] = palm_ray_origin + palm_ray_t * palm_ray_dir
result[batch][2] = palm_ray_t
if not left_hit and right_hit:
# Usually, _both_ rays will hit. If only one doesn't, report both as zero anyways
# to let the outside code assume as much.
result[batch][0] = 0.
result[batch][1] = 0.
else:
result[batch][0] = left_ray_t
result[batch][1] = right_ray_t
class WarpGeometeryScene:
def __init__(self):
self._warp_mesh_cache = {}
self._trimesh_cache = {}
def query(self, Ts, from_mesh, to_mesh, render=False, query_name=None, from_mesh_scale=1.0):
# Transforms take "from-mesh" coordinates into "to-mesh" coordinates
from_mesh = self._load_and_cache_geometry(from_mesh, "warp")
to_mesh = self._load_and_cache_geometry(to_mesh, "warp")
pq_array = T2pq_array(Ts)
xforms = wp.array(pq_array[:, (0,1,2,4,5,6,3)], dtype=wp.transform, device=DEVICE)
with wp.ScopedTimer("intersect", active=False):
carb.profiler.begin(1, f"collision check (N={len(Ts)})", active=True)
query_num_faces = len(from_mesh.indices) // 3
shape = (len(xforms),query_num_faces)
array_results = wp.empty(shape=shape, dtype=int, device=DEVICE)
wp.launch(kernel=intersect, dim=shape, inputs=[from_mesh.id, from_mesh_scale, xforms, to_mesh.id, array_results], device=DEVICE)
wp.synchronize()
# Get num contacts per transform by summing over all faces
results = array_results.numpy()
if len(Ts) == 0:
# warp 0.5.1
results = np.empty(shape)
results = results.sum(1)
carb.profiler.end(1, True)
if render:
if query_name is None:
query_name = str(self._get_mesh_name(to_mesh)).split("/")[-1]
self.viz_query(results, xforms, from_mesh, to_mesh, query_name)
return results
def query_grasp_contacts(self, Ts, from_mesh, to_mesh, render=False, query_name=None):
# Transforms take "from-mesh" coordinates into "to-mesh" coordinates
carb.profiler.begin(1, "Prep meshes", active=True)
from_mesh = self._load_and_cache_geometry(from_mesh, "warp")
to_mesh = self._load_and_cache_geometry(to_mesh, "warp")
carb.profiler.end(1, True)
carb.profiler.begin(1, "Prep transforms", active=True)
carb.profiler.begin(1, "T2pq", active=True)
pq_array = T2pq_array(Ts)
carb.profiler.end(1, True)
xforms = wp.array(pq_array[:, (0,1,2,4,5,6,3)], dtype=wp.transform, device=DEVICE)
carb.profiler.end(1, True)
with wp.ScopedTimer("intersect_and_contact", active=False):
carb.profiler.begin(1, f"collision and contact measure (N={len(Ts)})", active=True)
query_num_faces = len(from_mesh.indices) // 3
shape = (len(xforms),query_num_faces)
contacts_shape = (len(xforms), 3)
contact_results = wp.empty(shape=contacts_shape, dtype=float, device=DEVICE)
points = wp.empty(shape=(len(xforms), 3), dtype=wp.vec3, device=DEVICE)
intersect_results = wp.empty(shape=shape, dtype=int, device=DEVICE)
wp.launch(kernel=intersect, dim=shape, inputs=[from_mesh.id, 1.0, xforms, to_mesh.id, intersect_results], device=DEVICE)
wp.launch(kernel=grasp_contacts, dim=(len(xforms),), inputs=[to_mesh.id, (0.0, 0.04, 0.005), (0.0, -0.04, 0.005), (0.0,0.0,-0.025), xforms, contact_results, points], device=DEVICE)
wp.synchronize()
# Get num contacts per transform by summing over all faces
intersections = intersect_results.numpy()
contacts = contact_results.numpy()
if len(Ts) == 0:
# warp 0.5.1
intersections = np.empty(shape)
contacts = np.empty(shape)
intersections = intersections.sum(1)
carb.profiler.end(1, True)
if render:
if query_name is None:
query_name = str(self._get_mesh_name(to_mesh)).split("/")[-1]
self.viz_query(intersections, xforms, from_mesh, to_mesh, query_name, contacts=points.numpy())
return intersections, contacts
def viz_query(self, collisions, xforms, from_mesh, to_mesh, target_name, contacts=None):
if len(xforms) == 0:
return
renderer = wp.render.UsdRenderer(f"/tmp/collision_viz/{target_name}-{time.time()}.usd", upaxis="z")
#renderer.render_ground()
with wp.ScopedTimer("render", active=True):
renderer.begin_frame(0.0)
to_mesh_points = to_mesh.points.numpy()
to_mesh_indices = to_mesh.indices.numpy()
from_mesh_points = from_mesh.points.numpy()
from_mesh_indices = from_mesh.indices.numpy()
to_extents = np.max(to_mesh_points, axis=0) - np.min(to_mesh_points, axis=0)
spacing_x = to_extents[0] + .3
spacing_y = to_extents[1] + .3
row_size = int(np.sqrt(len(xforms)))
for i, xform in enumerate(xforms.numpy()):
x_offset = (i % row_size) * spacing_x
y_offset = (i // row_size) * spacing_y
renderer.render_mesh(f"to_{target_name}_{i}", points=to_mesh_points, indices=to_mesh_indices, pos=wp.vec3(x_offset, y_offset, 0))
p, q = xform[:3], xform[3:]
renderer.render_mesh(f"frommesh_{i}", points=from_mesh_points, indices=from_mesh_indices, pos=wp.vec3(p[0] + x_offset, p[1] + y_offset, p[2]), rot=q)
if contacts is not None:
for j, contact in enumerate(contacts[i]):
renderer.render_sphere(f"contact_{i}_{j}", pos=wp.vec3(contact[0] + x_offset, contact[1] + y_offset, contact[2]), rot=q, radius=.01)
# if pair intersects then draw a small box above the pair
if collisions[i] > 0:
renderer.render_box(f"result_{i}", pos=wp.vec3(x_offset, y_offset, .15), rot=wp.quat_identity(), extents=(0.01, 0.01, 0.02))
renderer.end_frame()
renderer.save()
def get_support_surfaces(self, geom):
as_trimesh = self._load_and_cache_geometry(geom, "trimesh")
facet_centroids = np.empty((len(as_trimesh.facets), 3))
for i, (facet, total_area) in enumerate(zip(as_trimesh.facets, as_trimesh.facets_area)):
weighted_centroid = 0
for tri_index in facet:
weighted_centroid += as_trimesh.area_faces[tri_index] * as_trimesh.triangles_center[tri_index]
facet_centroids[i] = weighted_centroid / total_area
if len(facet_centroids) == 0:
return facet_centroids, np.empty((0,3)), as_trimesh.facets, as_trimesh.facets_area, as_trimesh.facets_boundary
return facet_centroids, as_trimesh.facets_normal, as_trimesh.facets, as_trimesh.facets_area, as_trimesh.facets_boundary
def combine_geometries_to_mesh(self, geoms, xforms) -> wp.Mesh:
tri = self.combine_geometries_to_trimesh(geoms, xforms)
mesh = warp_from_trimesh(tri)
return mesh
def combine_geometries_to_trimesh(self, geoms, xforms) -> trimesh.Trimesh:
assert len(geoms) == len(xforms)
trimeshes = [self._load_and_cache_geometry(geom, target="trimesh").copy(include_cache=True).apply_transform(xform) for geom, xform in zip(geoms, xforms)]
tri = trimesh.util.concatenate(trimeshes)
return tri
def _load_and_cache_geometry(self, obj, target='warp') -> Union[wp.Mesh, trimesh.Trimesh]:
if target == 'warp':
if isinstance(obj, wp.Mesh):
return obj
cached = self._warp_mesh_cache.get(obj.GetPath(), None)
if cached is not None:
return cached
else:
# Assume that the object is a usd geom
tri = self._load_and_cache_geometry(obj, target='trimesh')
processed = warp_from_trimesh(tri)
self._warp_mesh_cache[obj.GetPath()] = processed
return processed
elif target == "trimesh":
if isinstance(obj, trimesh.Trimesh):
return obj
cached = self._trimesh_cache.get(obj.GetPath(), None)
if cached is not None:
return cached
else:
# Assume that the object is a usd geom
tri = geom_to_trimesh(obj)
self._trimesh_cache[obj.GetPath()] = tri
return tri
else:
assert(False)
def _get_mesh_name(self, mesh):
return list(self._warp_mesh_cache.keys())[list(self._warp_mesh_cache.values()).index(mesh)]
def warp_from_trimesh(trimesh: trimesh.Trimesh):
mesh = wp.Mesh(
points=wp.array(trimesh.vertices, dtype=wp.vec3, device=DEVICE),
indices=wp.array(trimesh.faces.flatten(), dtype=int, device=DEVICE))
return mesh
def get_support_surfaces_trimesh(mesh: trimesh.Trimesh, for_normal=None, threshold=None):
# No caching at the moment so don't put this in any loops
facet_centroids = []
if for_normal:
scores = mesh.facets_normal.dot(for_normal)
support_mask = scores < threshold
else:
support_mask = np.ones((len(mesh.facets)))
facets = []
for facet, total_area, is_support in zip(mesh.facets, mesh.facets_area, support_mask):
if not is_support:
continue
facets.append(facet)
weighted_centroid = 0
for tri_index in facet:
weighted_centroid += mesh.area_faces[tri_index] * mesh.triangles_center[tri_index]
facet_centroids.append(weighted_centroid / total_area)
return facets, mesh.facets_area[support_mask], np.array(facet_centroids), mesh.facets_normal[support_mask]
def geom_to_trimesh(geom):
if isinstance(geom, UsdGeom.Mesh):
trimesh = load_trimesh_from_usdgeom(geom)
elif isinstance(geom, UsdGeom.Cube):
trimesh = get_trimesh_for_cube(geom)
elif isinstance(geom, UsdGeom.Cylinder):
trimesh = get_trimesh_for_cylinder(geom)
elif isinstance(geom, UsdGeom.Cone):
trimesh = get_trimesh_for_cone(geom)
elif isinstance(geom, UsdGeom.Sphere):
trimesh = get_trimesh_for_sphere(geom)
else:
raise Exception("No mesh representation for obj" + str(geom))
return trimesh
def get_trimesh_for_cube(cube: UsdGeom.Cube):
transform = cube.GetLocalTransformation()
translate, rotation, scale = UsdSkel.DecomposeTransform(transform)
transform = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], scale[2], 1))
size = cube.GetSizeAttr().Get()
baked_trimesh = trimesh.creation.box(extents=(size, size, size))
baked_trimesh.apply_transform(transform)
return baked_trimesh
def get_trimesh_for_cylinder(cylinder: UsdGeom.Cylinder):
transform = cylinder.GetLocalTransformation()
translate, rotation, scale = UsdSkel.DecomposeTransform(transform)
transform = Gf.Matrix4d(Gf.Vec4d(scale[0], scale[1], scale[2], 1))
baked_trimesh = trimesh.creation.cylinder(radius=cylinder.GetRadiusAttr().Get(), height=cylinder.GetHeightAttr().Get())
baked_trimesh.apply_transform(transform)
return baked_trimesh
def get_trimesh_for_cone(cone: UsdGeom.Cone):
baked_trimesh = trimesh.creation.cone(radius=cone.GetRadiusAttr().Get(), height=cone.GetHeightAttr().Get())
baked_trimesh.apply_transform(trimesh.transformations.translation_matrix([0,0,-cone.GetHeightAttr().Get() / 2]))
return baked_trimesh
def get_trimesh_for_sphere(shpere: UsdGeom.Sphere):
transform = shpere.GetLocalTransformation()
baked_trimesh = trimesh.creation.icosphere(radius=shpere.GetRadiusAttr().Get())
baked_trimesh.apply_transform(transform)
return baked_trimesh
def load_trimesh_from_usdgeom(mesh: UsdGeom.Mesh):
transform = mesh.GetLocalTransformation()
baked_trimesh = trimesh.Trimesh(vertices=mesh.GetPointsAttr().Get(), faces=np.array(mesh.GetFaceVertexIndicesAttr().Get()).reshape(-1,3))
baked_trimesh.apply_transform(transform)
return baked_trimesh
| 16,494 |
Python
| 42.637566 | 192 | 0.619619 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/motion_commander.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
from typing import Optional, Union
import numpy as np
import omni.isaac.cortex.math_util as math_util
import omni.isaac.motion_generation.interface_config_loader as icl
import quaternion
from omni.isaac.core.objects import VisualSphere
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from omni.isaac.core.utils.math import normalized
from omni.isaac.core.utils.prims import (
get_prim_at_path,
is_prim_path_valid,
)
from omni.isaac.core.utils.stage import add_reference_to_stage
from omni.isaac.cortex.cortex_object import CortexObject
from omni.isaac.cortex.math_util import to_stage_units, matrix_to_quat, unpack_T, invert_T
from omni.isaac.motion_generation import MotionPolicyController, ArticulationMotionPolicy, RmpFlowSmoothed, PathPlannerVisualizer
from omni.isaac.motion_generation.lula import RRT
from pxr import Gf, UsdGeom, Usd, Sdf
import srl.teleop.assistance
from srl.teleop.assistance.transforms import integrate_twist
def build_motion_commander(physics_dt, robot, obstacles):
""" Build the motion commander object.
Creates an RmpFlowSmoothed motion policy to govern the motion generation using the
RMPflowCortex motion policy config. This policy is a wrapped version of RmpFlowSmoothed which
measures jerk and both dynamically adjusts the system's speed if a large jerk is predicted,
and truncates small/medium sized jerks.
Also, adds the target prim, adds end-effector prim to the hand prim returned by
get_robot_hand_prim_path(robot), and adds the provided obstacles to the underlying policy.
Params:
- physics_dt: The time delta used by physics in seconds. Default: 1./60 seconds.
- robot: The robot object. Supported robots are currently Franka and UR10.
- obstacles: A dictionary of obstacles to be added to the underlying motion policy.
"""
"""motion_policy = RmpFlowSmoothed(
**icl.load_supported_motion_policy_config("Franka", "RMPflow", policy_config_dir=get_extension_path_from_name("srl.teleop") + "/data/rmpflow")
)"""
motion_policy = RmpFlowSmoothed(
**icl.load_supported_motion_policy_config("Franka", "RMPflowCortex")
)
# Setup the robot commander and replace its (xform) target prim with a visible version.
motion_policy_controller = MotionPolicyController(
name="rmpflow_controller",
articulation_motion_policy=ArticulationMotionPolicy(
robot_articulation=robot, motion_policy=motion_policy
),
)
# Lula config files for supported robots are stored in the motion_generation extension under
# "/path_planner_configs" and "motion_policy_configs"
mg_extension_path = get_extension_path_from_name("omni.isaac.motion_generation")
rrt_config_dir = os.path.join(mg_extension_path, "path_planner_configs")
rmp_config_dir = os.path.join(mg_extension_path, "motion_policy_configs")
# Initialize an RRT object
rrt = RRT(
robot_description_path = rmp_config_dir + "/franka/rmpflow/robot_descriptor.yaml",
urdf_path = rmp_config_dir + "/franka/lula_franka_gen.urdf",
rrt_config_path = rrt_config_dir + "/franka/rrt/franka_planner_config.yaml",
end_effector_frame_name = "right_gripper"
)
target_prim = make_target_prim("/motion_controller_target")
commander = MotionCommander(robot, motion_policy_controller, rrt, target_prim)
hand_prim_path = robot.prim_path + "/panda_hand"
add_end_effector_prim_to_robot(commander, hand_prim_path, "eff")
for obs in obstacles.values():
commander.add_obstacle(obs)
return commander
def make_target_prim(prim_path="/cortex/belief/motion_controller_target"):
""" Create the prim to be used as the motion controller target and add it to the stage.
Creates an axis marker.
"""
target_prim = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=prim_path)
target_prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
xformable = XFormPrim(target_prim.GetPath(), "motion_controller_target")
xformable.set_local_scale((.4,.4,.4))
return CortexObject(xformable)
def add_end_effector_prim_to_robot(motion_commander, hand_prim_path, eff_prim_name):
""" Add an end-effector prim as a child of the specified hand prim.
In general, a motion policy consuming commands from the motion commander may not use an
end-effector explicitly represented as a prim in the underlying robot USD. This method measures
the location of the underlying policy's end-effector, computes the relative transform between
the specified hand prim and that end-effector, and adds an explicit end-effector prim as a child
of the hand prim to represent the end-effector in USD.
This call uses MotionCommander.calc_policy_eff_pose_rel_to_hand(hand_prim_path) to calculate
where the end-effector transform used by the underlying motion policy is relative to the
specified hand prim.
The end-effector prim is added to the path <hand_prim_path>/<eff_prim_name>
"""
eff_prim_path = hand_prim_path + "/" + eff_prim_name
# Only add the prim if it doesn't already exist.
if not is_prim_path_valid(eff_prim_path):
print("No end effector detected. Adding one.")
eff_prim = XFormPrim(eff_prim_path, "eff_transform")
eff_prim_viz = VisualSphere(eff_prim_path + "/viz", "eff_viz", radius=0.003)
eff_prim_viz.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
eff_prim = eff_prim.prim
else:
eff_prim = get_prim_at_path(eff_prim_path)
pose = calc_policy_eff_pose_rel_to_hand(motion_commander, hand_prim_path)
p = to_stage_units(pose[0])
q = pose[1]
eff_prim.GetAttribute("xformOp:translate").Set(Gf.Vec3d(*p.tolist()))
eff_prim.GetAttribute("xformOp:orient").Set(Gf.Quatd(*q.tolist()))
#eff_prim.GetAttribute("xformOp:scale").Set(Gf.Vec3d(.1,.1,.1))
def calc_policy_eff_pose_rel_to_hand(commander, ref_prim_path):
""" Calculates the pose of the controlled end-effector in coordinates of the reference prim
in the named path.
The underlying motion policy uses an end-effector that's not necessarily available in the
franka robot. It's that control end-effector pose that's returned by the forward kinematics
(fk) methods below. This method gets that control end-effector pose relative to a given prim
(such as the hand frame) so, for instance, a new prim can be added relative to that frame
for reference elsewhere.
"""
ref_T = get_prim_world_T_meters(ref_prim_path)
#print("hand_prim_T_meter:\n", ref_T)
#note
eff_T = commander.get_fk_T()
#print("eff_T from mg:\n", eff_T)
eff_T_rel2ref = invert_T(ref_T).dot(eff_T)
R, p = unpack_T(eff_T_rel2ref)
q = matrix_to_quat(R)
return p, q
class ApproachParams(object):
""" Parameters describing how to approach a target (in position).
The direction is a 3D vector pointing in the direction of approach. It'd magnitude defines the
max offset from the position target the intermediate approach target will be shifted by. The std
dev defines the length scale a radial basis (Gaussian) weight function that defines what
fraction of the shift we take. The radial basis function is defined on the orthogonal distance
to the line defined by the target and the direction vector.
Intuitively, the normalized vector direction of the direction vector defines which direction to
approach from, and it's magnitude defines how far back we want the end effector to come in from.
The std dev defines how tighly the end-effector approaches along that line. Small std dev is
tight around that approach line, large std dev is looser. A good value is often between 1 and 3
cm.
See calc_shifted_approach_target() for the specific implementation of how these parameters are
used.
"""
def __init__(self, direction, std_dev):
self.direction = direction
self.std_dev = std_dev
def __str__(self):
return "{direction: %s, std_dev %s}" % (str(self.approach), str(self.std_dev))
class MotionCommand:
""" A motion command includes the motion API parameters: a target pose (required), optional
approach parameters, and an optional posture configuration.
The target pose is a full position and orientation target. The approach params define how the
end-effector should approach that target. And the posture config defines how the system should
resolve redundancy and generally posture the arm on approach.
"""
def __init__(self, target_position: Optional[np.array], target_orientation: Optional[quaternion.quaternion]=None, approach_params=None, posture_config=None):
self.target_position = target_position
self.target_orientation = target_orientation
self.approach_params = approach_params
self.posture_config = posture_config
@property
def has_approach_params(self):
return self.approach_params is not None
@property
def has_posture_config(self):
return self.posture_config is not None
class VelocityMotionCommand:
def __init__(self, target_linear_velocity: np.ndarray, target_angular_velocity: np.ndarray, frame_trans=np.identity(3), frame_rot=np.identity(3)):
self.target_linear_velocity = target_linear_velocity
self.target_angular_velocity = target_angular_velocity
self.frame_trans = frame_trans
self.frame_rot = frame_rot
class PlannedMoveCommand:
def __init__(self, cspace_goal: Optional[np.ndarray]=None, end_effector_goal: Optional[np.ndarray]=None):
self.cspace_target = cspace_goal
self.end_effector_goal = end_effector_goal
def __eq__(self, obj):
if not isinstance(obj, PlannedMoveCommand):
return False
if self.cspace_target is not None and obj.cspace_target is not None:
return np.allclose(self.cspace_target, obj.cspace_target)
else:
return False
class SmoothedCommand(object):
""" Represents a smoothed command.
The API includes:
- reset(): Clear the current smoothed target data.
- update(): Updating the data given a new target.
A command consists of a position target, an optional rotation matrix target, and a posture
config. The smoothed command is stored in members x (position), R (rotation matrix), q (posture
config), and can be accessed from there. On first update of any given component, the component
is set directly to the value provided. On subsequent updates the currently value is averaged
with the new value, creating an exponentially weighted average of values received. If a
particular component is never received (e.g. the posture config, or the rotation matrix) the
corresponding member is never initialized and remains None.
Rotation recursive averaging is done by averaging the matrices themselves then projecting using
math_util.proj_R(), which converts the (invalid) rotation matrix to a quaternion, normalizes,
then converts back to a matrix.
If use_distance_based_smoothing_regulation is set to True (default) the degree of smoothing
diminishes to a minimum value of 0.5 as the system approaches the target. This feature is
optimized for discrete jumps in targets. Then a large jump is detected, the smoothing increase
to the interpolation_alpha provided on initialization, but then decreases to the minimum value
as it nears the target. Note that the distance between rotation matrices factors into the
distance to target.
"""
def __init__(self, interpolation_alpha=0.95, use_distance_based_smoothing_regulation=True):
""" Initialize to use interpolation_alpha as the alpha blender. Larger values mean higher
smoothing. interpolation_alpha should be between 0 and 1; a good default (for use with 60hz
updates) is given by SmoothedCommand_a.
"""
self.x = None
self.R = None
self.q = None
self.init_interpolation_alpha = interpolation_alpha
self.use_distance_based_smoothing_regulation = use_distance_based_smoothing_regulation
self.reset()
def reset(self):
""" Reset the smoother back to its initial state.
"""
self.x = None
self.R = None
self.q = None
self.interpolation_alpha = self.init_interpolation_alpha
def update(self, target_p, target_R, posture_config, eff_x, eff_R):
""" Update the smoothed target given the current command (target, posture_config) and the
current end-effector frame (eff_{x,R}).
Params:
- target: A target object implementing the TargetAdapter API. (It need not have a rotational
target.)
- posture_config: The posture configuration for this command. None is valid.
- eff_x: The position component of the current end-effector frame.
- eff_R: The rotational component of the current end-effector frame.
"""
x_curr = target_p
R_curr = None
if target_R is not None:
R_curr = target_R
q_curr = None
if posture_config is not None:
q_curr = np.array(posture_config)
if self.x is None:
self.x = eff_x
if self.R is None:
self.R = eff_R
if self.q is None:
self.q = q_curr
# Clear the R if there's no rotation command. But don't do the same for the posture config.
# Always keep around the previous posture config.
if R_curr is None:
self.R = None
if self.use_distance_based_smoothing_regulation:
d = np.linalg.norm([eff_x - x_curr])
if self.R is not None:
d2 = np.linalg.norm([eff_R - self.R]) * 1.0
d = max(d, d2)
std_dev = 0.05
scalar = 1.0 - np.exp(-0.5 * (d / std_dev) ** 2)
alpha_min = 0.5
a = scalar * self.interpolation_alpha + (1.0 - scalar) * alpha_min
else:
a = self.interpolation_alpha
self.x = a * self.x + (1.0 - a) * x_curr
if self.R is not None and R_curr is not None:
self.R = math_util.proj_R(a * self.R + (1.0 - a) * R_curr)
if self.q is not None and q_curr is not None:
self.q = a * self.q + (1.0 - a) * q_curr
def calc_shifted_approach_target(target_T, eff_T, approach_params):
""" Calculates how the target should be shifted to implement the approach given the current
end-effector position.
- target_p: Final target position.
- eff_p: Current end effector position.
- approach_params: The approach parameters.
"""
target_R, target_p = math_util.unpack_T(target_T)
eff_R, eff_p = math_util.unpack_T(eff_T)
direction = approach_params.direction
std_dev = approach_params.std_dev
v = eff_p - target_p
an = normalized(direction)
norm = np.linalg.norm
dist = norm(v - np.dot(v, an) * an)
dist += 0.5 * norm(target_R - eff_R) / 3
alpha = 1.0 - np.exp(-0.5 * dist * dist / (std_dev * std_dev))
shifted_target_p = target_p - alpha * direction
return shifted_target_p
def get_prim_world_T_meters(prim_path):
""" Computes and returns the world transform of the prim at the provided prim path in units of
meters.
"""
prim = get_prim_at_path(prim_path)
prim_tf = UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(Usd.TimeCode.Default())
transform = Gf.Transform()
transform.SetMatrix(prim_tf)
position = transform.GetTranslation()
orientation = transform.GetRotation().GetQuat()
p = np.array(position)
R = np.array(Gf.Matrix3d(orientation).GetTranspose())
T = math_util.pack_Rp(R, math_util.to_meters(p))
return T
class MotionCommander:
""" The motion commander provides an abstraction of motion for the cortex wherein a lower-level
policy implements the motion commands defined by MotionCommand objects.
This class adds and end-effector prim to the robot's hand and creates a target prim for setting
targets. The target prim can be set to a target manually via a call to set_target() or it can be
controlled using a gizmo through the OV viewport.
Independent of what the stage units currently are, this class provides an SI interface. Commands
are specified in units of meters and forward kinematics is returned in units of meters.
"""
def __init__(self, robot, motion_controller, rrt, target_prim):
self.robot = robot
self.motion_controller = motion_controller
self.smoothed_command = SmoothedCommand()
self.rrt = rrt
# Use the PathPlannerVisualizer wrapper to generate a trajectory of ArticulationActions
self.path_planner_visualizer = PathPlannerVisualizer(robot,rrt)
self.robot_prim = get_prim_at_path(self.amp.get_robot_articulation().prim_path)
self.target_prim = None
self.register_target_prim(target_prim)
self.is_target_position_only = False
self.last_command = None
def set_target_position_only(self):
self.is_target_position_only = True
def set_target_full_pose(self):
self.is_target_position_only = False
def register_target_prim(self, target_prim):
""" Register the specified target prim with this commander. This prim will both visualize
the commands being sent to the motion commander, and it can be used to manually control the
robot using the OV viewport's gizmo.
"""
self.target_prim = CortexObject(target_prim) # Target prim will be in units of meters.
self.set_command(MotionCommand(*self.get_fk_pq()))
def calc_policy_eff_pose_rel_to_hand(self, ref_prim_path):
""" Calculates the pose of the controlled end-effector in coordinates of the reference prim
in the named path.
The underlying motion policy uses an end-effector that's not necessarily available in the
franka robot. It's that control end-effector pose that's returned by the forward kinematics
(fk) methods below. This method gets that control end-effector pose relative to a given prim
(such as the hand frame) so, for instance, a new prim can be added relative to that frame
for reference elsewhere.
"""
ref_T = get_prim_world_T_meters(ref_prim_path)
print("hand_prim_T_meter:\n", ref_T)
eff_T = self.get_fk_T()
print("eff_T from mg:\n", eff_T)
eff_T_rel2ref = math_util.invert_T(ref_T).dot(eff_T)
R, p = math_util.unpack_T(eff_T_rel2ref)
q = math_util.matrix_to_quat(R)
return p, q
def reset(self):
""" Reset this motion controller. This method ensures that any internal integrators of the
motion policy are reset, as is the smoothed command.
"""
self.motion_policy.reset()
self.smoothed_command.reset()
@property
def amp(self):
""" Accessor for articulation motion policy from the motion controller.
"""
return self.motion_controller.get_articulation_motion_policy()
@property
def motion_policy(self):
""" The motion policy used to command the robot.
"""
return self.motion_controller.get_articulation_motion_policy().get_motion_policy()
@property
def aji(self):
""" Active joint indices. These are the indices into the full C-space configuration vector
of the joints which are actively controlled.
"""
return self.amp.get_active_joints_subset().get_joint_subset_indices()
def get_end_effector_pose(self, config=None):
""" Returns the control end-effector pose in units of meters (the end-effector used by
motion gen).
Motion generation returns the end-effector pose in stage units. We convert it to meters
here. Returns the result in the same (<position>, <rotation_matrix>) tuple form as motion
generation.
If config is None (default), it uses the current applied action (i.e. current integration
state of the underlying motion policy which the robot is trying to follow). By using the
applied action (rather than measured simulation state) the behavior is robust and consistent
regardless of simulated PD control nuances. Otherwise, if config is set, calculates the
forward kinematics for the provided joint config. config should be the full C-space
configuration of the robot.
"""
if config is None:
# No active joints config was specified, so fill it in with the current applied action.
action = self.robot.get_applied_action()
config = np.array(action.joint_positions)
active_joints_config = config[self.aji]
p, R = self.motion_policy.get_end_effector_pose(active_joints_config)
p = math_util.to_meters(p)
return p, R
def get_eef_T(self):
"""
Return the true, current end effect pose, using latest joint angle measurements
"""
return self.get_fk_T(self.robot.get_joint_positions()[:-2])
def get_fk_T(self, config=None):
""" Returns the forward kinematic transform to the control frame as a 4x4 homogeneous
matrix. Uses currently applied joint position goal, which may differ from real joint positions
in cases where the controller is oscillating.
"""
p, R = self.get_end_effector_pose(config)
return math_util.pack_Rp(R, p)
def get_fk_pq(self, config=None):
""" Returns the forward kinematic transform to the control frame as a
(<position>,<quaternion>) pair.
"""
p, R = self.get_end_effector_pose(config)
return p, quaternion.from_rotation_matrix(R)
def get_fk_p(self, config=None):
""" Returns the position components of the forward kinematics transform to the end-effector
control frame.
"""
p, _ = self.get_end_effector_pose(config)
return p
def get_fk_R(self, config=None):
""" Returns the rotation matrix components of the forward kinematics transform to the
end-effector control frame.
"""
_, R = self.get_end_effector_pose(config)
return R
def set_command(self, command: Union[MotionCommand, VelocityMotionCommand]):
""" Set the active command to the specified value. The command is smoothed before passing it
into the underlying policy to ensure it doesn't change too quickly.
If the command does not have a rotational target, the end-effector's current rotation is
used in its place.
Note the posture configure should be a full C-space configuration for the robot.
"""
eff_T = self.get_fk_T()
eff_p = eff_T[:3, 3]
eff_R = eff_T[:3, :3]
if isinstance(command, VelocityMotionCommand):
screw_T = integrate_twist(3 * command.frame_trans @ command.target_linear_velocity, 12 * command.frame_rot @ command.target_angular_velocity, 2)
target_posture = None
self.smoothed_command.interpolation_alpha = .6
new_T = eff_T @ screw_T
self.smoothed_command.update(new_T[:3,3], new_T[:3,:3], None, eff_p, eff_R)
elif isinstance(command, MotionCommand):
target_p, target_q = command.target_position, command.target_orientation
if target_q is None:
target_q = quaternion.from_rotation_matrix(eff_R)
if command.has_approach_params:
target_T = math_util.pack_Rp(quaternion.as_rotation_matrix(target_q), target_p)
target_p = calc_shifted_approach_target(target_T, eff_T, command.approach_params)
self.smoothed_command.interpolation_alpha = .95
self.smoothed_command.update(target_p, quaternion.as_rotation_matrix(target_q), command.posture_config, eff_p, eff_R)
elif isinstance(command, PlannedMoveCommand):
need_replan = True
if isinstance(self.last_command, PlannedMoveCommand):
if self.last_command == command:
need_replan = False
if need_replan:
self.rrt.set_cspace_target(command.cspace_target)
self.plan = self.path_planner_visualizer.compute_plan_as_articulation_actions(max_cspace_dist = .01)
if self.plan:
next_action = self.plan[0]
p, q = self.get_fk_pq(config=next_action.joint_positions)
self.smoothed_command.interpolation_alpha = 0
self.smoothed_command.update(p, quaternion.as_rotation_matrix(q), None, eff_p, eff_R)
target_p = self.smoothed_command.x
target_R = self.smoothed_command.R
target_T = math_util.pack_Rp(target_R, target_p)
target_posture = self.smoothed_command.q
self.target_prim.set_world_pose(position=target_p, orientation=math_util.matrix_to_quat(target_R))
if target_posture is not None:
self.set_posture_config(target_posture)
self.last_command = command
def set_posture_config(self, posture_config):
""" Set the posture configuration of the underlying motion policy.
The posture configure should be a full C-space configuration for the robot.
"""
policy = self.motion_policy._policy
policy.set_cspace_attractor(posture_config)
def _sync_end_effector_target_to_motion_policy(self):
""" Set the underlying motion generator's target to the pose in the target prim.
Note that the world prim is a CortexObject which is always in units of meters. The motion
generator uses stage units, so we have to convert.
"""
target_translation, target_orientation = self.target_prim.get_world_pose()
if self.is_target_position_only:
self.motion_policy.set_end_effector_target(math_util.to_stage_units(target_translation))
p, _ = self.target_prim.get_world_pose()
q = self.get_fk_pq().q
self.target_prim.set_world_pose(p, q)
else:
self.motion_policy.set_end_effector_target(math_util.to_stage_units(target_translation), target_orientation)
def get_action(self, dt):
""" Get the next action from the underlying motion policy. Returns the result as an
ArticulationAction object.
"""
self.amp.physics_dt = dt
self._sync_end_effector_target_to_motion_policy()
self.motion_policy.update_world()
action = self.amp.get_next_articulation_action()
if isinstance(self.last_command, PlannedMoveCommand):
if self.plan:
action = self.plan.pop(0)
return action
def step(self, dt):
""" Convenience method for both getting the current action and applying it to the
underlying robot's articulation controller.
"""
action = self.get_action(dt)
self.robot.get_articulation_controller().apply_action(action)
def add_obstacle(self, obs):
""" Add the provided obstacle to the underlying motion policy so they will be avoided.
The obstacles must be core primitive types. See omni.isaac.core/omni/isaac/core/objects for
options.
See also omni.isaac.motion_generation/omni/isaac/motion_generation/world_interface.py:
WorldInterface.add_obstacle(...)
"""
self.motion_policy.add_obstacle(obs)
def disable_obstacle(self, obj):
""" Distable the given object as an obstacle in the underlying motion policy.
Disabling can be done repeatedly safely. The object can either be a core api object or a
cortex object.
"""
try:
# Handle cortex objects -- extract the underlying core api object.
if hasattr(obj, "obj"):
obj = obj.obj
self.motion_policy.disable_obstacle(obj)
except Exception as e:
err_substr = "Attempted to disable an already-disabled obstacle"
if err_substr in str(e):
print("<lula error caught and ignored (obj already disabled)>")
else:
raise e
def enable_obstacle(self, obj):
""" Enable the given object as an obstacle in the underlying motion policy.
Enabling can be done repeatedly safely. The object can either be a core api object or a
cortex object.
"""
try:
# Handle cortex objects -- extract the underlying core api object.
if hasattr(obj, "obj"):
obj = obj.obj
self.motion_policy.enable_obstacle(obj)
except Exception as e:
err_substr = "Attempted to enable an already-enabled obstacle"
if err_substr in str(e):
print("<lula error caught and ignored (obj already enabled)>")
else:
raise e
| 29,384 |
Python
| 42.340708 | 161 | 0.672441 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/camera_controls.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import omni
from omni.isaac.sensor import Camera
import math
from srl.teleop.assistance.transforms import rotate_vec_by_quat
from omni.isaac.core.utils.viewports import set_camera_view
import time
import quaternion
class ArcballCameraControls:
def __init__(self, camera_path, focus_delegate) -> None:
self.camera_path = camera_path
self.last_free_camera_view = None
self._last_holdable_control = None
self._hold_stamp = time.time()
self._hold_duration = 0
self.camera = Camera(self.camera_path, name="persp")
self.focus_delegate = focus_delegate
def update(self, control_input):
if control_input in {"ROTATE RIGHT", "ROTATE LEFT", "PITCH DOWN", "PITCH UP", "ZOOM IN", "ZOOM OUT"}:
now = time.time()
if self._last_holdable_control != control_input or now > self._hold_stamp + 0.2:
# Interpret as a new press
self._hold_duration = 0
elif now > self._hold_stamp:
self._hold_duration += 1
self._hold_stamp = now
self._last_holdable_control = control_input
focus_point = self.focus_delegate()
if control_input == "ROTATE RIGHT" or control_input == "ROTATE LEFT":
sign = 1
if control_input == "ROTATE LEFT":
sign = -1
self._rotate_camera_eye_by_quat(
quaternion.from_euler_angles(0,0,sign * .02 * min(math.log(math.e + self._hold_duration), 3)),
focus_point)
elif control_input == "PITCH UP" or control_input == "PITCH DOWN":
sign = 1
if control_input == "PITCH DOWN":
sign = -1
self._rotate_camera_eye_by_quat(
quaternion.from_euler_angles(0,sign * .02 * min(math.log(math.e + self._hold_duration), 3),0),
focus_point)
elif control_input == "ZOOM IN" or control_input == "ZOOM OUT":
sign = 1
if control_input == "ZOOM OUT":
sign = -1
current_cam_pose = self.camera.get_world_pose()
set_camera_view(
eye=current_cam_pose[0] + (sign * .02 * min(math.log(math.e + self._hold_duration), 3)) * (focus_point - current_cam_pose[0]),
target=focus_point,
camera_prim_path=self.camera_path
)
def _rotate_camera_eye_by_quat(self, quat: quaternion.quaternion, focus):
current_cam_pose = self.camera.get_world_pose()
set_camera_view(
eye=rotate_vec_by_quat(current_cam_pose[0], quat),
target=focus,
camera_prim_path=self.camera_path
)
class SwappableViewControls:
def __init__(self, camera_path, main_viewport, secondary_viewport, on_flip=lambda x: x):
self.main_viewport = main_viewport
self.secondary_viewport = secondary_viewport
self.camera_path = camera_path
# Outside expects us to have a handle to a controllable camera.
self.camera = Camera(self.camera_path, name="persp")
#self.camera.pause()
self._hold_stamp = time.time()
self._hold_duration = 0
self.on_flip = on_flip
def update(self, control_input):
if control_input == 0:
return
now = time.time()
if now > self._hold_stamp + 0.2:
# Interpret as a new press
self._hold_duration = 0
else:
self._hold_duration += 1
self._hold_stamp = now
if self._hold_duration > 0:
return
self.swap()
def swap(self):
prev_main_camera = self.main_viewport.viewport_api.get_active_camera()
prev_secondary_camera = self.secondary_viewport.viewport_api.get_active_camera()
self.main_viewport.viewport_api.set_active_camera(prev_secondary_camera)
self.secondary_viewport.viewport_api.set_active_camera(prev_main_camera)
self.on_flip(prev_secondary_camera == self.camera_path)
@property
def active_index(self):
return 0 if self.camera_path == self.main_viewport.viewport_api.get_active_camera() else 1
def set_fixed_view(self):
omni.kit.commands.execute("UnlockSpecs", spec_paths=[self.camera.prim_path])
#set_camera_view((-.35, -1.16, 1.29), (.35, 0, 0), self.camera_path, self.main_viewport.viewport_api)
set_camera_view((1.79, 0, 1.35), (.25, 0, 0), self.camera_path, self.main_viewport.viewport_api)
def lock_fixed(self):
omni.kit.commands.execute("LockSpecs", spec_paths=[self.camera.prim_path])
| 4,743 |
Python
| 40.252174 | 142 | 0.598777 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/__init__.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.assistance import Assistance
from srl.teleop.assistance.assistance_extension import AssistanceExtension
import os
# Conveniences to other module directories via relative paths
EXT_DIR = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)), "../../../"))
DATA_DIR = os.path.join(EXT_DIR, "data")
__all__ = [
# global paths
"EXT_DIR",
"DATA_DIR",
]
| 551 |
Python
| 28.05263 | 96 | 0.720508 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/assistance.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import datetime
import weakref
from srl.teleop.assistance.camera_controls import ArcballCameraControls, SwappableViewControls
from srl.teleop.assistance.logging import CONTROLS_STATE_DTYPE, ROBOT_STATE_DTYPE, UI_STATE_DTYPE
from srl.teleop.assistance.profiling import profile
from srl.teleop.assistance.tasks.reaching import ReachingTask
from srl.teleop.base_sample import BaseSample
from .behavior.scene import ContextTools, SceneContext
from srl.teleop.assistance.behavior.network import build_control_behavior, build_suggestion_display_behavior, build_suggestion_selection_behavior
from srl.teleop.assistance.check_collision import WarpGeometeryScene
from srl.teleop.assistance.proposals import FixedTargetProposal, build_proposal_tables
from srl.spacemouse.spacemouse_extension import get_global_spacemouse
from omni.isaac.core.world import World
from omni.isaac.core.prims.xform_prim import XFormPrim
import numpy as np
import omni
import carb
import time
import quaternion
from omni.kit.viewport.utility import get_active_viewport_window
from srl.teleop.assistance.transforms import invert_T, pack_Rp
from srl.teleop.assistance.viewport import configure_main_viewport, configure_realsense_viewport, disable_viewport_interaction, get_realsense_viewport, layout_picture_in_picture
from srl.teleop.assistance.viz import viz_laser_rooted_at
from srl.teleop.assistance.motion_commander import build_motion_commander, add_end_effector_prim_to_robot
from srl.teleop.assistance.ui import AssistanceMode, ControlFrame, strfdelta
from pxr import UsdGeom, PhysxSchema
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.debug_draw import _debug_draw
class Assistance(BaseSample):
def __init__(self, task, viewport_manipulator) -> None:
super().__init__()
self.set_world_settings(rendering_dt= 1/30, physics_dt=1/60)
self._timeline = omni.timeline.get_timeline_interface()
self._stage = None
self.scene_context = None
self.control_behavior = None
self.suggestion_selection_behavior = None
self.suggestion_display_behavior = None
self.models = None
self.start_stamp = None
self.last_stamp = time.time()
self._camera_controls = None
self._draw = _debug_draw.acquire_debug_draw_interface()
self._task = task
self.viewport_manipulator = viewport_manipulator
self.viewport_disable_handles = None
def setup_scene(self):
"""
Called by super when the world and stage are setup
"""
# Turn off scientific notation to make debug prints scannable
np.set_printoptions(suppress=True)
world = self.get_world()
world.add_task(self._task)
self._stage = omni.usd.get_context().get_stage()
def physics_step(self, step):
if self._world.is_stopped():
return
if self._task.is_done():
self._world.stop()
return
carb.profiler.begin(1, "physics step", active=True)
if self.start_stamp is None:
self.start_stamp = time.time()
# Force everyone to redraw anything they want shown each frame
self._draw.clear_lines()
self._draw.clear_points()
# Make sure we've let the simulation settle a few steps before updating the eff prim. Otherwise
# the hand prim starts in a strange place which disagrees with joint states
if self._world.current_time_step_index > 10:
hand_prim_path = self.franka.prim_path + "/panda_hand"
# FIXME: This gets called for the first time when the commander is built, but at that point
# the hand prim position is wrong relative to the controller's FK frame. We call it again here
# to put the eff prim in the right place.
add_end_effector_prim_to_robot(self.commander, hand_prim_path, "eff")
spacemouse = get_global_spacemouse()
if spacemouse and not self.control_behavior:
self.configure_behaviors()
elif not spacemouse:
self.control_behavior = None
self.suggestion_selection_behavior = None
self.suggestion_display_behavior = None
with profile("scene_context.monitors", True):
for mon in self.scene_context.monitors:
mon(self.scene_context)
if self.control_behavior:
#HACK: For basic assistance familiarization in study
if isinstance(self._task, ReachingTask) and self.models["suggest_grasps"].as_bool:
if self.control_behavior.context.button_command[2]:
self.selection_behavior.context.fixed_proposal = FixedTargetProposal(self._task._current_target_T)
with profile("control_behavior.monitors", True):
for mon in self.control_behavior.context.monitors:
mon(self.control_behavior.context)
with profile("control_behavior.step", True):
self.control_behavior.step()
with profile("selection.monitors", True):
for mon in self.selection_behavior.context.monitors:
mon(self.selection_behavior.context)
with profile("selection.step", True):
self.selection_behavior.step()
with profile("suggestion_display_behavior.monitors", True):
for mon in self.suggestion_display_behavior.context.monitors:
mon(self.suggestion_display_behavior.context)
with profile("suggestion_display_behavior.step", True):
self.suggestion_display_behavior.step()
action = self.commander.get_action(World.instance().get_physics_dt())
self.franka.get_articulation_controller().apply_action(action)
if self.models is not None and self.models["use_laser"].as_bool:
viz_laser_rooted_at(f"{self.franka.prim_path}/panda_hand/guide", pack_Rp(np.identity(3), np.array((0, 0, .07))))
orig_style = self.models["left_label"][1].style
if hasattr(self._task, "time_remaining") and self._task.time_remaining:
to_display = datetime.timedelta(seconds=self._task.time_remaining)
self.models["left_label"][0].text = strfdelta(to_display, '%M:%S')
if to_display.total_seconds() < 60:
orig_style["background_color"] = 0x330000FF
else:
orig_style["background_color"] = 0x33000000
else:
to_display = datetime.timedelta(seconds=time.time() - self.start_stamp)
self.models["left_label"][0].text = strfdelta(to_display, '%M:%S')
orig_style["background_color"] = 0x33000000
self.models["left_label"][1].set_style(orig_style)
carb.profiler.end(1, True)
async def setup_post_reset(self):
self.commander.reset()
omni.usd.get_context().get_selection().set_selected_prim_paths([], True)
def world_cleanup(self):
self._world.remove_physics_callback("sim_step")
if self.viewport_disable_handles:
self.viewport_disable_handles = None
return
async def setup_pre_reset(self):
return
async def setup_post_load(self):
scene = self._world.scene
self.ghosts = [scene.get_object("ghost_franka0"),scene.get_object("ghost_franka1")]
self.franka = scene.get_object("franka")
await self._world.play_async()
if self.franka is None:
carb.log_error("Grasp Suggestion load failed trying to retrieve Franka from scene. Make sure you have"
"cleared the stage completely before attempted to load.")
assert False
self.realsense_vp = get_realsense_viewport(self.franka.camera.prim.GetPath())
configure_realsense_viewport(self.realsense_vp)
self.main_vp = get_active_viewport_window("Viewport")
configure_main_viewport(self.main_vp)
self.viewport_disable_handles = disable_viewport_interaction(self.main_vp), disable_viewport_interaction(self.realsense_vp)
self.models["control_frame"].get_item_value_model().set_value(2)
layout_picture_in_picture(self.main_vp, self.realsense_vp)
def get_focus():
point = self.commander.get_fk_p()
point[2] = 0
return point
#self._camera_controls = ArcballCameraControls("/OmniverseKit_Persp", focus_delegate=get_focus)
def on_flip(main_is_original):
if main_is_original:
self.models["control_frame"].get_item_value_model().set_value(2)
else:
self.models["control_frame"].get_item_value_model().set_value(0)
self._camera_controls = SwappableViewControls("/OmniverseKit_Persp",self.main_vp, self.realsense_vp, on_flip=on_flip)
self._camera_controls.set_fixed_view()
self._objects = self._task.get_task_objects()
self._scene_objects = self._task.get_scene_objects()
self._object_ghosts = self._task.get_ghost_objects()
#self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=len(self.ghosts))
# NOTE: motioncommander requires the articulation view to already exist, which it isn't before setup_post_load
self.commander = build_motion_commander(self.get_world().get_physics_dt(), self.franka, {})
self.eff_prim = XFormPrim(self.franka.prim_path + "/panda_hand/eff")
self.target_prim = XFormPrim("/motion_controller_target")
await self._world.play_async()
self._camera_controls.lock_fixed()
# Generate all possible suggestions we could have based on object geometry
ee_T = self.commander.get_eef_T()
inv_ee_T = invert_T(ee_T)
part_Ts = self.franka.get_gripper_collision_Ts()
ee_to_part_Ts = [inv_ee_T.dot(part_T) for part_T in part_Ts]
self.ee_to_part_Ts = ee_to_part_Ts
self.collision_checker = WarpGeometeryScene()
self.gripper_collision_mesh = self.collision_checker.combine_geometries_to_mesh(self.franka.get_gripper_collision_meshes(), self.ee_to_part_Ts)
with profile("filter_proposal_tables"):
self.grasp_table, self.placement_table, self.plane_table = build_proposal_tables(self.collision_checker, list(self._objects.values()), list(self._scene_objects.values()), self.gripper_collision_mesh)
#self.viewport_manipulator.update(self.grasp_table, self.placement_table, self.plane_table)
self.scene_context = SceneContext(ContextTools(self._world, self.viewport_manipulator, self._objects, self._scene_objects, {}, self._object_ghosts, self.franka, self.ghosts, self.commander, self.grasp_table, self.placement_table, self.plane_table, self.collision_checker, self.gripper_collision_mesh), self.models["suggest_grasps"].as_bool, self.models["suggest_placements"].as_bool)
self._world.add_physics_callback("sim_step", callback_fn=self.physics_step)
omni.usd.get_context().get_selection().set_selected_prim_paths([], True)
def configure_behaviors(self):
assistance_mode = AssistanceMode(self.models["assistance_mode"].get_item_value_model().as_int)
control_frame = ControlFrame(self.models["control_frame"].get_item_value_model().as_int)
self.control_behavior = build_control_behavior(weakref.proxy(self.scene_context.tools), get_global_spacemouse(), control_frame, weakref.proxy(self.scene_context), assistance_mode, weakref.proxy(self._camera_controls).update, self.models["avoid_obstacles"].as_bool)
self.selection_behavior = build_suggestion_selection_behavior(weakref.proxy(self.scene_context.tools), weakref.proxy(self.scene_context), weakref.proxy(self.control_behavior.context), self.models["use_surrogates"].as_bool, self.models["snapping"].as_bool)
self.control_behavior.context.selection_context = weakref.proxy(self.selection_behavior.context)
self.suggestion_display_behavior = build_suggestion_display_behavior(weakref.proxy(self.scene_context.tools), weakref.proxy(self.scene_context), weakref.proxy(self.control_behavior.context), weakref.proxy(self.selection_behavior.context), self.models["center_label"])
def register_ui_models(self, models):
self.models = models
def overlay_opacity_change(model):
value = model.get_value_as_float()
self.suggestion_display_behavior.context.overlay_opacity = value
def control_frame_change(model,_):
if self.control_behavior:
self.control_behavior.context.control_frame = ControlFrame(model.get_item_value_model().as_int)
def assistance_mode_change(model, _):
if self.control_behavior:
self.control_behavior.context.assistance_mode = AssistanceMode(model.get_item_value_model().as_int)
def should_assist_change(model):
self.scene_context.should_suggest_placements = self.models["suggest_placements"].as_bool
self.scene_context.should_suggest_grasps = self.models["suggest_grasps"].as_bool
if self.selection_behavior:
self.selection_behavior.context.use_surrogates = self.models["use_surrogates"].as_bool
self.selection_behavior.context.use_snapping = self.models["snapping"].as_bool
self.models["overlay_opacity"][0].add_value_changed_fn(overlay_opacity_change)
self.models["control_frame"].add_item_changed_fn(control_frame_change)
self.models["assistance_mode"].add_item_changed_fn(assistance_mode_change)
self.models["suggest_grasps"].add_value_changed_fn(should_assist_change)
self.models["suggest_placements"].add_value_changed_fn(should_assist_change)
self.models["use_surrogates"].add_value_changed_fn(should_assist_change)
self.models["snapping"].add_value_changed_fn(should_assist_change)
async def _on_ui_value_change(self, name, value):
if name == "suggest_grasps":
self.scene_context.should_suggest_grasps = value
elif name == "suggest_placements":
self.scene_context.should_suggest_placements = value
elif name == "avoid_obstacles":
if self.control_behavior:
self.control_behavior.context.avoid_obstacles = value
elif name == "use_laser":
imageable = UsdGeom.Imageable(get_prim_at_path(f"{self.franka.prim_path}/panda_hand/guide"))
if not value:
imageable.MakeInvisible()
else:
imageable.MakeVisible()
elif name == "use_surrogates":
if self.selection_behavior:
self.selection_behavior.context.use_surrogates = value
else:
print("unhandled ui event", name, value)
def _on_logging_event(self, val):
world = self.get_world()
data_logger = world.get_data_logger()
if not world.get_data_logger().is_started():
data_logger.add_data_frame_logging_func(self.frame_logging_func)
if val:
data_logger.start()
else:
data_logger.pause()
return
def frame_logging_func(self, tasks, scene):
if self.suggestion_display_behavior.context is None:
return {}
# return always a dict
applied_action = self.franka.get_applied_action()
spacemouse = get_global_spacemouse()
trans, rot, buttons = (0,0,0), (0,0,0), 0
trans_raw, rot_raw, buttons_raw = (0,0,0), (0,0,0), 0
if spacemouse:
stamp, trans, rot, buttons = spacemouse.get_controller_state()
stamp, trans_raw, rot_raw, buttons_raw = spacemouse._control
p,q = self.commander.get_fk_pq()
target_p, target_q = self.commander.target_prim.get_world_pose()
data = {}
robot_state = np.empty((1,), dtype=ROBOT_STATE_DTYPE)
robot_state['eef_pose']["position"] = p
robot_state['eef_pose']["orientation"] = quaternion.as_float_array(q)
robot_state['target_pose']["position"] = target_p
robot_state['target_pose']["orientation"] = target_q
#frame['eef_vel_lin'] = self.franka.gripper.get_linear_velocity()
#frame['eef_vel_ang'] = self.franka.gripper.get_angular_velocity()
twist = self.scene_context.ee_vel_tracker.get_twist()
if twist is None:
twist = np.zeros(6)
robot_state['eef_vel_lin'] = twist[:3]
robot_state['eef_vel_ang'] = twist[3:]
robot_state['joint_positions'] = self.franka.get_joint_positions()
robot_state['joint_velocities'] = self.franka.get_joint_velocities()
robot_state['applied_joint_positions'] = applied_action.joint_positions
robot_state['applied_joint_velocities'] = applied_action.joint_velocities
ui_state = np.empty((1,), dtype=UI_STATE_DTYPE)
cam_p, cam_q = self._camera_controls.camera.get_world_pose()
ui_state['primary_camera'] = self._camera_controls.active_index
ui_state['camera_pose']['position'] = cam_p
ui_state['camera_pose']['orientation'] = cam_q
ghost_i, (ghost_p, ghost_q) = self.suggestion_display_behavior.context.get_current_object_ghost_index_and_pose()
ui_state['object_ghost_pose']['position'] = ghost_p
ui_state['object_ghost_pose']['orientation'] = ghost_q
ui_state['object_ghost_index'] = ghost_i
ui_state["robot_ghost_joint_positions"] = self.suggestion_display_behavior.context.get_current_robot_ghost_joint_positions()
ui_state["ghost_is_snapped"] = self.selection_behavior.context.suggestion_is_snap
controls_state = np.empty((1,), dtype=CONTROLS_STATE_DTYPE)
controls_state["filtered"] = trans, rot, buttons
controls_state["raw"] = trans_raw, rot_raw, buttons_raw
data["robot_state"] = robot_state
data["controls_state"] = controls_state
data["scene_state"] = self._task.get_observations()
data["ui_state"] = ui_state
return data
| 18,162 |
Python
| 51.799418 | 391 | 0.665345 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/viz.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
import omni
import srl.teleop
from srl.teleop.assistance.transforms import T2pq, make_rotation_matrix, pq2T, invert_T, normalized
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_at_path
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.objects.cone import VisualCone
from omni.isaac.core.objects.cylinder import VisualCylinder
from omni.isaac.core.objects.sphere import VisualSphere
from pxr import Usd, UsdGeom, Sdf, UsdLux
import quaternion
import numpy as np
import math
from pxr import Sdf, Usd, UsdGeom, Gf
from omni.isaac.debug_draw import _debug_draw
def ray_cast(
position: np.array, orientation: np.array, offset: np.array, max_dist: float = 100.0
, viz=False):
"""Projects a raycast forward along x axis with specified offset
If a hit is found within the maximum distance, then the object's prim path and distance to it is returned.
Otherwise, a None and 10000 is returned.
Args:
position (np.array): origin's position for ray cast
orientation (np.array): origin's orientation for ray cast
offset (np.array): offset for ray cast
max_dist (float, optional): maximum distance to test for collisions in stage units. Defaults to 100.0.
Returns:
typing.Tuple[typing.Union[None, str], float]: path to geometry that was hit and hit distance, returns None, 10000 if no hit occurred
"""
# based on omni.isaac.core.utils.collisions.ray_cast
if viz:
draw = _debug_draw.acquire_debug_draw_interface()
draw.clear_lines()
input_tr = Gf.Matrix4f()
input_tr.SetTranslate(Gf.Vec3f(*position.tolist()))
input_tr.SetRotateOnly(Gf.Quatf(*orientation.tolist()))
offset_transform = Gf.Matrix4f()
offset_transform.SetTranslate(Gf.Vec3f(*offset.tolist()))
raycast_tf = offset_transform * input_tr
trans = raycast_tf.ExtractTranslation()
direction = raycast_tf.ExtractRotation().TransformDir((1, 0, 0))
origin = (trans[0], trans[1], trans[2])
ray_dir = (direction[0], direction[1], direction[2])
if viz:
draw.draw_lines([np.array(trans)], [np.array(trans) + np.array(direction) * max_dist], [np.array((1,0,0, 1))], [1])
hit = omni.physx.get_physx_scene_query_interface().raycast_closest(origin, ray_dir, max_dist)
if hit["hit"]:
usdGeom = UsdGeom.Mesh.Get(get_current_stage(), hit["rigidBody"])
distance = hit["distance"]
return usdGeom.GetPath().pathString, distance
return None, 10000.0
def viz_axis(parent_path, position, orientation, scale=(1,1,1)):
prim_path = omni.usd.get_stage_next_free_path(get_current_stage(), parent_path, False)
prim = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=prim_path)
prim = XFormPrim(str(prim.GetPath()), position=position, orientation=orientation)
prim.prim.SetInstanceable(True)
prim.set_local_scale(scale)
return prim
def viz_axis_named_T(name: str, T: np.ndarray, scale=(1,1,1)):
p, q = T2pq(T, as_float_array=True)
viz_axis_named(name,p, q, scale)
def viz_axis_named_Rp(name: str, R: np.ndarray, p: np.ndarray, scale=(1,1,1)):
q = quaternion.from_rotation_matrix(R)
viz_axis_named(name, p, quaternion.as_float_array(q), scale)
def viz_axis_named_Ts(name: str, Ts: np.ndarray, scale=(1,1,1)):
path = f"/Viz/{name}"
proto_path = "/Viz/axis_proto"
if not is_prim_path_valid(proto_path):
proto = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=proto_path)
#UsdGeom.Imageable(proto).MakeInvisible()
p, q = T2pq(Ts)
QF = quaternion.as_float_array(q)
if is_prim_path_valid(path):
axes_prim = UsdGeom.PointInstancer(get_prim_at_path(path))
axes_prim.GetPositionsAttr().Set(p)
axes_prim.GetOrientationsAttr().Set(QF[:, (1,2,3,0)])
axes_prim.GetScalesAttr().Set([scale] * len(p))
else:
axes_prim = UsdGeom.PointInstancer.Define(get_current_stage(), path)
axes_prim.CreatePositionsAttr(p)
axes_prim.CreateOrientationsAttr(QF[:, (1,2,3,0)])
axes_prim.CreateProtoIndicesAttr([0] * len(p))
axes_prim.CreatePrototypesRel().SetTargets([proto_path])
axes_prim.CreateScalesAttr([scale] * len(p))
def viz_axis_named(name: str, position: np.ndarray, orientation: np.ndarray, scale=(1,1,1)):
path = f"/Viz/{name}"
if is_prim_path_valid(path):
axis_prim = XFormPrim(path)
else:
axis_prim = add_reference_to_stage(usd_path=os.path.join(srl.teleop.assistance.DATA_DIR, "axis.usda"), prim_path=path)
axis_prim = XFormPrim(str(axis_prim.GetPath()))
axis_prim.prim.SetInstanceable(True)
axis_prim.set_world_pose(position, orientation)
axis_prim.set_local_scale(scale)
return axis_prim
def viz_point_named(name: str, point, scale=(1,1,1)):
path = f"/Viz/{name}"
prim = VisualSphere(path, name, radius=scale[0] * .05 / 8)
prim.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
prim.set_world_pose(position=point)
def viz_points_named(name: str, points: np.ndarray, scale=(1,1,1), max_instances=None):
path = f"/Viz/{name}"
proto_path = "/Viz/sphere_proto"
p = points
assert len(points.shape) == 2 and points.shape[-1] == 3
if not is_prim_path_valid(proto_path):
proto = VisualSphere(proto_path, "sphere_Proto", radius=.05 / 8)
proto.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
if max_instances is None:
max_instances = len(points)
else:
p = np.resize(points, (max_instances, 3))
visible = np.arange(0, max_instances)
invisible = visible[len(points):]
if is_prim_path_valid(path):
axes_prim = UsdGeom.PointInstancer(get_prim_at_path(path))
axes_prim.GetPositionsAttr().Set(p)
#axes_prim.GetScalesAttr().Set([scale] * max_instances)
axes_prim.GetInvisibleIdsAttr().Set(invisible)
else:
axes_prim = UsdGeom.PointInstancer.Define(get_current_stage(), path)
axes_prim.CreatePositionsAttr(p)
axes_prim.CreateProtoIndicesAttr([0] * len(p))
axes_prim.CreatePrototypesRel().SetTargets([proto_path])
axes_prim.CreateScalesAttr([scale] * max_instances)
axes_prim.CreateInvisibleIdsAttr(invisible)
def viz_dirs_named_Ts(name, Ts, scale=(1,1,1), max_instances=None):
path = f"/Viz/{name}"
proto_path = "/Viz/cone_proto"
if not is_prim_path_valid(proto_path):
proto = VisualCone(proto_path, "cone_proto", height=0.05, radius=.05 / 8)
proto.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
p, q = T2pq(Ts)
QF = quaternion.as_float_array(q)
if max_instances is None:
max_instances = len(Ts)
else:
p = np.resize(p, (max_instances, 3))
QF = np.resize(QF, (max_instances, 4))
visible = np.arange(0, max_instances)
invisible = visible[len(Ts):]
if is_prim_path_valid(path):
axes_prim = UsdGeom.PointInstancer(get_prim_at_path(path))
axes_prim.GetPositionsAttr().Set(p)
axes_prim.GetOrientationsAttr().Set(QF[:, (1,2,3,0)])
#axes_prim.GetScalesAttr().Set([scale] * max_instances)
axes_prim.GetInvisibleIdsAttr().Set(invisible)
else:
axes_prim = UsdGeom.PointInstancer.Define(get_current_stage(), path)
axes_prim.CreatePositionsAttr(p)
axes_prim.CreateOrientationsAttr(QF[:, (1,2,3,0)])
axes_prim.CreateProtoIndicesAttr([0] * len(p))
axes_prim.CreatePrototypesRel().SetTargets([proto_path])
axes_prim.CreateScalesAttr([scale] * max_instances)
axes_prim.CreateInvisibleIdsAttr(invisible)
def viz_delta(name, from_prim, to_prim, radius=0.001):
path = f"/Viz/delta/{name}"
if not is_prim_path_valid(path):
prim = VisualCylinder(path, f"delta{name}", height=0, radius=radius)
prim.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
else:
prim = VisualCylinder(path, f"delta{name}", height=0, radius=radius)
from_pq = from_prim.get_world_pose()
from_p, from_q = from_pq[0], quaternion.from_float_array(from_pq[1])
from_T = pq2T(*from_pq)
to_T = pq2T(*to_prim.get_world_pose())
direction = to_T[:3,3] - from_T[:3,3]
prim.set_height(np.linalg.norm(direction))
ori = quaternion.from_rotation_matrix(make_rotation_matrix(normalized(direction), (1,0,0)))
prim.set_world_pose(from_p + (direction / 2), quaternion.as_float_array(ori))
def viz_delta_rooted_at(name, root_path, to_prim, radius=0.0005):
path = f"{root_path}/{name}"
prim = XFormPrim(path)
marker_prim = VisualCylinder(path + "/marker", f"delta{name}", height=0, radius=radius)
marker_prim.geom.GetAxisAttr().Set("Z")
from_prim = XFormPrim(root_path)
from_pq = from_prim.get_world_pose()
from_T = pq2T(*from_pq)
to_T = pq2T(*to_prim.get_world_pose())
diff = invert_T(from_T) @ to_T
direction = diff[:3,3]
ori = quaternion.from_rotation_matrix(make_rotation_matrix((direction), (1,0,0)))
prim.set_local_pose((0,0,0), quaternion.as_float_array(ori))
dist = np.linalg.norm(direction)
marker_prim.set_height(dist)
marker_prim.set_local_pose((0,0, dist / 2), (1,0,0,0))
def viz_laser_rooted_at(root_path, T):
beam_path = f"{root_path}/beam"
hit_path = f"{root_path}/hit"
if not is_prim_path_valid(root_path):
root = XFormPrim(root_path)
p, q = T2pq(T)
# Rotate to point Y in direction of X. No axis attr on CylinderLight
q = q * quaternion.from_euler_angles(np.array((0,-math.pi / 2,0)))
root.set_local_pose(p, quaternion.as_float_array(q))
beam = UsdLux.CylinderLight.Define(get_current_stage(), beam_path)
beam.AddTranslateOp()
beam.CreateColorAttr((1.,.1,.1))
beam.CreateIntensityAttr(50000.)
beam.CreateRadiusAttr(0.00075)
beam.CreateLengthAttr(0.0)
raw_beam = get_prim_at_path(beam_path)
raw_beam.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool, True).Set(True)
hit = UsdLux.SphereLight.Define(get_current_stage(), hit_path)
hit.CreateColorAttr((1.,.8,.8))
hit.CreateIntensityAttr(300.)
hit.CreateRadiusAttr(0.0025)
hit.CreateExposureAttr(2.0)
hit.CreateDiffuseAttr(0.1)
hit.CreateSpecularAttr(0.9)
hit.AddTranslateOp()
raw_hit = get_prim_at_path(hit_path)
raw_hit.CreateAttribute("visibleInPrimaryRay", Sdf.ValueTypeNames.Bool, True).Set(True)
else:
root = XFormPrim(root_path)
beam = UsdLux.CylinderLight(get_prim_at_path(beam_path))
hit = UsdLux.SphereLight(get_prim_at_path(hit_path))
p,q = root.get_world_pose()
_, dist = ray_cast(p, q, np.zeros(3), 100)
beam.GetLengthAttr().Set(dist)
beam.GetOrderedXformOps()[0].Set((dist / 2.0, 0, 0))
hit.GetOrderedXformOps()[0].Set((dist, 0, 0))
| 11,334 |
Python
| 41.453183 | 140 | 0.667461 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/camera_franka.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Optional, List
import numpy as np
from omni.isaac.manipulators.grippers.parallel_gripper import ParallelGripper
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import get_stage_units, get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid, delete_prim, find_matching_prim_paths
from omni.isaac.core.prims.rigid_prim_view import RigidContactView, RigidPrimView
from omni.isaac.core.objects import VisualCuboid
from omni.isaac.franka import Franka
from pxr import Sdf, UsdGeom, Gf
import omni
import omni.kit
import quaternion
from srl.teleop.assistance.profiling import profile
from srl.teleop.assistance.transforms import T2pq, pq2T, rotate_vec_by_quat
from omni.isaac.sensor import Camera, ContactSensor
FINGER_CONTACT_OFFSET = np.array((0,0,.045))
class GripperContentsDebouncer:
def __init__(self) -> None:
self.last_contents_path = None
self.last_contents_timestamp = None
self.to_report = None
self.to_report_stamp = None
self.last_update = time.time()
def update(self, content_path):
now = time.time()
self.last_update = now
if self.last_contents_path == content_path:
self.last_contents_timestamp = now
elif now - self.last_contents_timestamp > 0.4:
#print("change to " + str(content_path))
self.last_contents_path = content_path
self.last_contents_timestamp = now
else:
pass
#print("ignoring change to " + str(content_path))
return self.last_contents_path
class CameraFranka(Franka):
HOME_CONFIG = np.array([-0.01561307, -1.2717055 , -0.02706644, -2.859138, -0.01377442, 2.0233166, 0.7314064])
"""[summary]
Args:
prim_path (str): [description]
name (str, optional): [description]. Defaults to "franka_robot".
usd_path (Optional[str], optional): [description]. Defaults to None.
position (Optional[np.ndarray], optional): [description]. Defaults to None.
orientation (Optional[np.ndarray], optional): [description]. Defaults to None.
end_effector_prim_name (Optional[str], optional): [description]. Defaults to None.
gripper_dof_names (Optional[List[str]], optional): [description]. Defaults to None.
gripper_open_position (Optional[np.ndarray], optional): [description]. Defaults to None.
gripper_closed_position (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
prim_path: str,
name: str = "franka_robot",
usd_path: Optional[str] = None,
position: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
end_effector_prim_name: Optional[str] = None,
gripper_dof_names: Optional[List[str]] = None,
gripper_open_position: Optional[np.ndarray] = None,
gripper_closed_position: Optional[np.ndarray] = None,
deltas: Optional[np.ndarray] = None,
collision_sensors=True,
contact_paths=None,
camera_sensor=True
) -> None:
if usd_path is None:
assets_root_path = get_assets_root_path()
usd_path = assets_root_path + "/Isaac/Robots/Franka/franka.usd"
super().__init__(prim_path, name, usd_path, position, orientation,end_effector_prim_name, gripper_dof_names, gripper_open_position, gripper_closed_position)
stage = get_current_stage()
prim = stage.GetPrimAtPath(prim_path + "/panda_link0/geometry")
prim.GetReferences().ClearReferences()
prim.GetReferences().AddReference(assets_root_path + "/Isaac/Robots/Franka/DetailedProps/panda_link0.usd")
realsense_path = self.prim_path + "/panda_hand/geometry/realsense"
alt_fingers_realsense_path = f"{self.prim_path}/panda_hand/geometry/realsense/realsense_camera"
self._default_camera_transform = ((0.00,0.049,0.053), (.5,-.5,-.5,-.5))
if camera_sensor:
if not is_prim_path_valid(realsense_path):
realsense = UsdGeom.Xformable(add_reference_to_stage(assets_root_path + "/Isaac/Robots/Franka/DetailedProps/realsense.usd",realsense_path))
realsense.AddRotateXYZOp().Set((180.,180.,90.))
self._camera = Camera(alt_fingers_realsense_path)
self._camera.set_horizontal_aperture(200)
self._camera.set_focal_length(48.0)
self._camera.set_clipping_range(0.001, 10000000.0)
self._camera.set_local_pose(*self._default_camera_transform)
self._camera.set_resolution((1280,720))
#self._camera.pause()
else:
self._camera = Camera(alt_fingers_realsense_path)
else:
self._camera = None
self._physx_query_interface = omni.physx.get_physx_scene_query_interface()
self._gripper_contents_debouncer = GripperContentsDebouncer()
if self._end_effector_prim_name is None:
self._end_effector_prim_path = prim_path + "/panda_rightfinger"
if gripper_dof_names is None:
gripper_dof_names = ["panda_finger_joint1", "panda_finger_joint2"]
if gripper_open_position is None:
gripper_open_position = np.array([0.05, 0.05]) / get_stage_units()
if gripper_closed_position is None:
gripper_closed_position = np.array([0.0, 0.0])
if gripper_dof_names is not None:
if deltas is None:
deltas = np.array([0.05, 0.05]) / get_stage_units()
self._gripper = ParallelGripper(
end_effector_prim_path=self._end_effector_prim_path,
joint_prim_names=gripper_dof_names,
joint_opened_positions=gripper_open_position,
joint_closed_positions=gripper_closed_position,
action_deltas=deltas,
)
if not is_prim_path_valid(self.prim_path + "/panda_hand/leftfinger_collider"):
left_cube = UsdGeom.Cube.Define(get_current_stage(), self.prim_path + "/panda_hand/leftfinger_collider")
left_cube.AddTranslateOp().Set((0.0, 0.0525, 0.09))
left_cube.AddScaleOp().Set((0.01, 0.013, 0.025))
UsdGeom.Imageable(left_cube).MakeInvisible()
right_cube = UsdGeom.Cube.Define(get_current_stage(), self.prim_path + "/panda_hand/rightfinger_collider")
right_cube.AddTranslateOp().Set((0.0, -0.0525, 0.09))
right_cube.AddScaleOp().Set((0.01, 0.013, 0.025))
UsdGeom.Imageable(right_cube).MakeInvisible()
gripper_cube = UsdGeom.Cube.Define(get_current_stage(), self.prim_path + "/panda_hand/hand_collider")
gripper_cube.AddTranslateOp().Set((0.025, 0.0, 0.016))
gripper_cube.AddScaleOp().Set((0.045, 0.1, 0.05))
UsdGeom.Imageable(gripper_cube).MakeInvisible()
else:
left_cube = get_prim_at_path(self.prim_path + "/panda_hand/leftfinger_collider")
right_cube = get_prim_at_path(self.prim_path + "/panda_hand/rightfinger_collider")
gripper_cube = get_prim_at_path(self.prim_path + "/panda_hand/hand_collider")
self._gripper_collision_meshes = [gripper_cube, left_cube, right_cube]
self._gripper_collision_views = [XFormPrim(f"{part.GetPath()}") for part in self._gripper_collision_meshes]
self._palm_prim = XFormPrim(self.prim_path + "/panda_hand")
self.contact_sensors = []
self.contact_views = []
self.contact_path_filter = None
if collision_sensors:
if contact_paths:
for part in ["panda_leftfinger", "panda_rightfinger"]:
self.contact_views.append(RigidContactView(f"{prim_path}/{part}", contact_paths, name=f"{part}_rigid_contact_view"))
else:
if is_prim_path_valid(prim_path + "/panda_leftfinger/contact_sensor"):
delete_prim(prim_path + "/panda_leftfinger/contact_sensor")
delete_prim(prim_path + "/panda_rightfinger/contact_sensor")
left = ContactSensor(prim_path + "/panda_leftfinger/contact_sensor", "left_finger_contact_sensor", translation=FINGER_CONTACT_OFFSET, radius=.03)
right = ContactSensor(prim_path + "/panda_rightfinger/contact_sensor", "right_finger_contact_sensor", translation=FINGER_CONTACT_OFFSET, radius=.03)
left.add_raw_contact_data_to_frame()
right.add_raw_contact_data_to_frame()
self.contact_sensors = [left, right]
self.reset_camera_position()
@property
def camera(self) -> Camera:
"""[summary]
Returns:
RigidPrim: [description]
"""
return self._camera
def initialize(self, physics_sim_view=None) -> None:
"""[summary]
"""
for sensor in self.contact_sensors:
sensor.initialize(physics_sim_view)
for view in self.contact_views:
view.initialize(physics_sim_view)
super().initialize(physics_sim_view)
if self.camera:
# Prevent scrolling or clicking from moving the wrist camera
omni.kit.commands.execute("LockSpecs", spec_paths=[self.camera.prim_path])
return
def post_reset(self) -> None:
"""[summary]
"""
super().post_reset()
self.reset_camera_position()
return
def reset_camera_position(self) -> None:
if self.camera:
self.camera.set_local_pose(*self._default_camera_transform)
def set_contact_path_filter(self, path_filter):
self.contact_path_filter = path_filter
def check_gripper_contents(self, threshold=None) -> Optional[str]:
"""Get the path of a prim that is colliding with the gripper's palm and/or either finger
Args:
threshold (_type_, optional): _description_. Defaults to None.
Returns:
str: _description_
"""
if len(self.contact_views) > 0:
forces = np.zeros(2)
finger_contact_ids = np.full(2, -1)
for i, view in enumerate(self.contact_views):
reading = np.squeeze(view.get_contact_force_matrix())
per_obj_norm = np.linalg.norm(reading, axis=-1)
highest_j = np.argmax(per_obj_norm)
forces[i] = per_obj_norm[highest_j]
finger_contact_ids[i] = highest_j
#print(finger_contact_paths, finger_contact_forces, finger_contact_times, overlapping)
if sum(forces != 0) == 2 and finger_contact_ids[0] == finger_contact_ids[1]:
# Optionally ensure that we're applying at least a certain amount of force
if threshold is not None and sum(forces) < threshold:
return None
return self.contact_path_filter[finger_contact_ids[0]]
return None
finger_contact_forces = []
finger_contact_paths = []
finger_contact_times = []
def check_non_robot_overlap():
paths = []
true_path = None
x_offset = (self.gripper.get_joint_positions()[0] - self.gripper.get_joint_positions()[1]) / 2
aperture = max(self.gripper.get_joint_positions()[0] + self.gripper.get_joint_positions()[1] - 0.01, 0)
if aperture == 0.0:
return None
def report_hit(hit):
nonlocal true_path
nonlocal paths
path = hit.rigid_body
if self.prim_path in path:
return True
paths.append(path)
if self.contact_path_filter is not None and self.contact_path_filter(path):
true_path = path
return False
return True # return True to continue the query
gripper_mesh = self._palm_prim
#left_mesh, right_mesh = self._gripper_collision_meshes[1], self._gripper_collision_meshes[2]
position, orientation = gripper_mesh.get_world_pose()[0], gripper_mesh.get_world_pose()[1]
position += rotate_vec_by_quat(np.array((0.,x_offset, .0895)), quaternion.from_float_array(orientation))
scale = (0.02, aperture ,0.045)
#cube = VisualCuboid("/viz/overlap", position=position, orientation=orientation,scale=scale)
numHits = self._physx_query_interface.overlap_box(np.array(scale) / 2, position, orientation, report_hit, False)
return true_path
overlapping = check_non_robot_overlap()
for sensor in self.contact_sensors:
reading = sensor.get_current_frame()
if len(reading["contacts"]) == 0:
continue
contact = reading["contacts"][0]
body0 = contact["body0"]
body1 = contact["body1"]
# Make sure we're getting the body that _isn't_ the robot
if self.prim_path not in body0.lower():
to_report = body0
elif self.prim_path not in body1.lower():
to_report = body1
else:
# Might happen if self collision is enabled?
assert False
finger_contact_forces.append(reading["force"])
finger_contact_paths.append(to_report)
finger_contact_times.append(reading["time"])
reading["contacts"].clear()
finger_contact_forces = tuple(finger_contact_forces)
#print(finger_contact_paths, finger_contact_forces, finger_contact_times, overlapping)
if len(finger_contact_forces) == 2:
# Optionally ensure that we're applying at least a certain amount of force
if threshold is not None and sum(finger_contact_forces) < threshold:
return None
if overlapping != finger_contact_paths[0]:
pass #print("gripper contents mismatch")
return overlapping
elif len(finger_contact_forces) == 1:
# Object isn't grasped unless both fingers are in contact, but sometimes the sensor is not correct
# so we just trust the overlap query
return overlapping
else:
return None
@property
def gripper_contents(self):
if time.time() - self._gripper_contents_debouncer.last_update > 0.01:
return self._gripper_contents_debouncer.update(self.check_gripper_contents(threshold=0.0001))
else:
return self._gripper_contents_debouncer.last_contents_path
def get_gripper_collision_meshes(self):
return self._gripper_collision_meshes
def get_gripper_collision_Ts(self):
self._gripper_collision_transforms = [pq2T(*view.get_world_pose()) for view in self._gripper_collision_views]
return self._gripper_collision_transforms
| 15,255 |
Python
| 45.941538 | 164 | 0.615011 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/transforms.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
import math
from pxr import Gf
import quaternion
from quaternion import quaternion as quat
from numpy.linalg import norm
import copy
import traceback
from omni.isaac.core.utils.rotations import quat_to_rot_matrix, matrix_to_euler_angles, euler_angles_to_quat
from typing import Tuple, List, Optional
from omni.isaac.core.prims.rigid_prim import RigidPrim
from scipy.spatial.transform import Rotation
def orthogonalize(R: np.ndarray, prioritize=(0,1,2)) -> np.ndarray:
reverse_mapping = tuple(prioritize.index(i) for i in range(3))
# QR decomp will preserve the first axis. The priority
# arg lets the caller decide what they want to preserve.
ordered = R[:, prioritize]
ortho_R, r = np.linalg.qr(ordered)
# Sign of the upper-triangular component diagonals indicate
# whether the sign of the original axes were flipped. The
# result is still orthogonal, but we
# choose to flip them all back so that we have a unique
# solution that respects the input signs.
if r[0,0] < 0:
ortho_R[:, 0] *= -1
if r[1,1] < 0:
ortho_R[:, 1] *= -1
if r[2,2] < 0:
ortho_R[:, 2] *= -1
reordered = ortho_R[:, reverse_mapping]
return reordered
def matrix_to_quat(rot_mat):
return euler_angles_to_quat(matrix_to_euler_angles(rot_mat))
def unpack_T(T) -> Tuple[np.ndarray, np.ndarray]:
""" Returns the rotation matrix and translation separately
Returns (R, p)
"""
return T[..., :3, :3], T[..., :3, 3]
def unpack_R(R) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
""" Returns the individual axes of the rotation matrix.
"""
return R[...,:3, 0], R[...,:3, 1], R[...,:3, 2]
def pack_R(ax, ay, az, as_homogeneous=False):
""" Returns a rotation matrix with the supplied axis columns.
R = [ax, ay, az]
"""
ax_v = np.atleast_2d(ax)
ay_v = np.atleast_2d(ay)
az_v = np.atleast_2d(az)
assert ax_v.shape[0] == ay_v.shape[0] == az_v.shape[0]
if as_homogeneous:
R = np.empty((ax_v.shape[0], 4, 4))
R[:] = np.eye(4)
else:
R = np.empty((ax_v.shape[0], 3, 3))
R[:] = np.eye(3)
R[...,:3, 0] = ax
R[...,:3, 1] = ay
R[...,:3, 2] = az
return np.squeeze(R)
def pack_Rp(R: np.ndarray, p: np.ndarray) -> np.ndarray:
""" Packs the provided rotation matrix (R) and position (p) into a homogeneous transform
matrix.
"""
# np.atleast_3d puts the extra dimension at the back but we need it at the front
Rv = np.atleast_2d(R)
Rb = Rv.view()
if Rv.ndim == 2:
Rb = Rv[None, :, :]
# The user can pass in a single R for many P, or a single P for many R. We'll size the output for
# the expected result of broadcasting.
pb = np.atleast_2d(p)
num_results = max(Rb.shape[0], pb.shape[0])
T = np.tile(np.eye(4)[None,...], (num_results, 1,1))
T[..., :3, :3] = Rb
T[..., :3, 3] = pb
if Rv.ndim == 2:
return T.squeeze()
else:
return T
def invert_T(T: np.ndarray):
""" Inverts the provided transform matrix using the explicit formula leveraging the
orthogonality of R and the sparsity of the transform.
Specifically, denote T = h(R, t) where h(.,.) is a function mapping the rotation R and
translation t to a homogeneous matrix defined by those parameters. Then
inv(T) = inv(h(R,t)) = h(R', -R't).
"""
R, t = unpack_T(T)
R_trans = np.swapaxes(R, -1, -2)
return pack_Rp(R_trans, np.squeeze(-R_trans @ t[..., None]))
def T2pq(T: np.ndarray, as_float_array=False) -> Tuple[np.ndarray, quat]:
""" Converts a 4d homogeneous matrix to a position-quaternion representation.
"""
R, p = unpack_T(T)
q = quaternion.from_rotation_matrix(R)
if as_float_array:
q = quaternion.as_float_array(q)
return p, q
def T2pq_array(T: np.ndarray) -> np.ndarray:
"""
Converts 4d homogeneous matrices to position-quaternion representation and stores them
in a (N,7) array. Rotation components of the transforms are assumed to already be orthonormal
"""
result = np.empty((len(T), 7), dtype=float)
R, result[:, :3] = unpack_T(T)
result[:, 3:] = quaternion.as_float_array(quaternion.from_rotation_matrix(R, nonorthogonal=False))
return result
def pq2T(p: np.ndarray, q: np.ndarray):
""" Converts a pose given as (<position>,<quaternion>) to a 4x4 homogeneous transform matrix.
"""
q_view = q
if q_view.dtype != "quaternion":
q_view = quaternion.from_float_array(q)
return pack_Rp(quaternion.as_rotation_matrix(q_view), p)
def euler2R(angles: np.array):
return pq2T((0,0,0), euler_angles_to_quat(angles))
def np_to_gfquat(q: np.array) -> Gf.Quatd:
qf = q.astype(float)
return Gf.Quatf(qf[0], Gf.Vec3f(qf[1], qf[2], qf[3]))
def rotate_vec_by_quat(v: np.ndarray, q: quat) -> np.ndarray:
q_view = quaternion.as_float_array(q)
u = q_view[1:]
s = q_view[0]
return 2.0 * np.dot(u, v) * u + (s*s - np.dot(u, u)) * v + 2.0 * s * np.cross(u, v)
def quat_vector_part(q):
"""Create an array of vector parts from an array of quaternions.
Parameters
----------
q : quaternion array_like
Array of quaternions.
Returns
-------
v : array
Float array of shape `q.shape + (3,)`
"""
q = np.asarray(q, dtype=np.quaternion)
return quaternion.as_float_array(q)[..., 1:]
def transform_dist(T1: np.ndarray, T2: np.ndarray, R_weight: float):
# eq 7 from 10.1007/978-3-319-33714-2_10
# Here the R distance is based on the magnitude of the geodesic, calculated directly via the trace
# If the translational distance is 0, the maximum distance is 2 * R_weight * sqrt(2/3). Set R_weight based on the size of the rigid bodies
# you are measuring between. So, around .15 is reasonable for a gripper
T1_v = T1.view()
T2_v = T2.view()
if len(T1.shape) == 2:
T1_v = T1[None,:]
if len(T2.shape) == 2:
T2_v = T2[None, :]
R1_inv = np.swapaxes(T1_v[...,:3,:3], -1, -2)
R2 = T2_v[...,:3,:3]
dists = np.linalg.norm(T2_v[..., :3, 3] - T1_v[...,:3,3], axis=-1) ** 2 + (2 * R_weight ** 2 * (1 - (np.trace(R1_inv @ R2, axis1=-1, axis2=-2) / 3)))
np.sqrt(dists, dists, where=dists>0)
return np.squeeze(dists)
def quat_angle(q1: np.ndarray, q2: np.ndarray):
# Angle of rotation to get from one orientation to another
return np.arccos(2. * np.inner(q1, q2) ** 2 - 1)
def matrix_to_quat(mat: np.ndarray) -> np.ndarray:
""" Converts the provided rotation matrix into a quaternion in (w, x, y, z) order.
"""
return quaternion.as_float_array(quaternion.from_rotation_matrix(mat))
def matrix_to_euler_angles(mat: np.ndarray) -> np.ndarray:
"""Convert rotation matrix to Euler XYZ angles.
Args:
mat (np.ndarray): A 3x3 rotation matrix.
Returns:
np.ndarray: Euler XYZ angles (in radians).
"""
cy = np.sqrt(mat[0, 0] * mat[0, 0] + mat[1, 0] * mat[1, 0])
singular = cy < 0.00001
if not singular:
roll = math.atan2(mat[2, 1], mat[2, 2])
pitch = math.atan2(-mat[2, 0], cy)
yaw = math.atan2(mat[1, 0], mat[0, 0])
else:
roll = math.atan2(-mat[1, 2], mat[1, 1])
pitch = math.atan2(-mat[2, 0], cy)
yaw = 0
return np.array([roll, pitch, yaw])
def slerp_quat(quaternion_0: quat, quaternion_1: quat, alpha: float) -> quat:
return quaternion.slerp(quaternion_0, quaternion_1, 0, 1, alpha)
def normalize(v, axis=-1):
l2 = np.atleast_1d(norm(v, axis=axis))
l2[l2==0] = 1
return np.squeeze(v / np.expand_dims(l2, axis))
def normalized(v, axis=-1):
if v is None:
return None
return normalize(copy.deepcopy(v), axis=axis)
def proj_orth(v1, v2, normalize_res=False, eps=1e-5):
""" Projects v1 orthogonal to v2. If v2 is zero (within eps), v1 is returned
unchanged. If normalize_res is true, normalizes the result before returning.
"""
v1v = np.atleast_2d(v1)
v2_norm = np.atleast_1d(np.linalg.norm(v2, axis=-1))
unproj_mask = v2_norm < eps
v2n = v2 / np.expand_dims(v2_norm,axis=-1)
res = v1v - np.expand_dims(np.einsum('ij,ij->i',np.atleast_2d(v1), np.atleast_2d(v2n)), axis=-1) * v2n
res[unproj_mask] = v1v[unproj_mask]
res = np.squeeze(res)
if normalize_res:
return normalized(res)
else:
return res
def make_rotation_matrix(az_dominant: np.array, ax_suggestion: np.array):
""" Constructs a rotation matrix with the z-axis given by az_dominant (normalized), and the
x-axis given by a orthogonally projected version of ax_suggestion. The y-axis is formed via the
right hand rule.
"""
az_v = np.atleast_1d(az_dominant)
ax_v = np.atleast_1d(ax_suggestion)
az_norm = normalized(az_v)
ax_proj = proj_orth(ax_v, az_norm, normalize_res=True)
ay = np.cross(az_norm, ax_proj)
return pack_R(ax_proj, ay, az_norm)
def axes_to_mat(axis_x, axis_z, dominant_axis="z"):
if dominant_axis == "z":
axis_x = proj_orth(axis_x, axis_z)
elif dominant_axis == "x":
axis_z = proj_orth(axis_z, axis_x)
elif dominant_axis is None:
pass
else:
raise RuntimeError("Unrecognized dominant_axis: %s" % dominant_axis)
axis_x = axis_x / norm(axis_x)
axis_z = axis_z / norm(axis_z)
axis_y = np.cross(axis_z, axis_x)
R = np.zeros((3, 3))
R[0:3, 0] = axis_x
R[0:3, 1] = axis_y
R[0:3, 2] = axis_z
return R
# Projects T to align with the provided direction vector v.
def proj_to_align(R, v):
max_entry = max(enumerate([np.abs(np.dot(R[0:3, i], v)) for i in range(3)]), key=lambda entry: entry[1])
return axes_to_mat(R[0:3, (max_entry[0] + 1) % 3], v)
def shortest_arc(normal_1: np.ndarray, normal_2: np.ndarray) -> quat:
# Are the normals already parallel?
normal_dot = normal_1.dot(normal_2)
if normal_dot > .99999:
# Same direction -> identity quat
return quaternion.quaternion(1,0,0,0)
elif normal_dot < -.999999:
# Exactly opposing -> 180 about arbitrary axis
return quaternion.quaternion(0,0,1,0)
else:
# Shortest arc between the vectors
a = np.cross(normal_1, normal_2)
# w is simple because we have unit normals: sqrt(norm(v1)**2 * norm(v2)**2) -> 1
return quaternion.quaternion(1 + normal_dot, *a).normalized()
def transform_point(p: np.ndarray, T: np.ndarray) -> np.ndarray:
return (T @ np.array((*p, 1)))[:3]
def R_to_rot_vector(R: np.ndarray) -> np.ndarray:
theta = R_to_angle(R)
with np.errstate(invalid='ignore', divide='ignore'):
# undefined if theta is 0 but we handle that in the following line
aa = theta /(2 * np.sin(theta))*np.array([R[...,2,1]-R[...,1,2], R[...,0,2]-R[...,2,0], R[...,1,0]-R[...,0,1]])
return np.where(~np.isnan(theta) & (theta != 0.0), aa, 0).T
def R_to_angle(R: np.ndarray) -> np.ndarray:
return np.arccos(np.clip((np.trace(R, axis1=-1, axis2=-2) - 1) / 2.,-1, 1))
def random_vector_in_spherical_cap(theta, dir, n, rng=None) -> np.ndarray:
result = np.empty((n,3))
if rng is None:
rng = np.random.default_rng()
result[:, 2] = rng.uniform(size=n, low=np.cos(theta), high=1.)
phi = np.random.rand(n) * 2 * math.pi
result[:, 0] = np.sqrt(1-result[:,2]**2)*np.cos(phi)
result[:, 1] = np.sqrt(1-result[:,2]**2)*np.sin(phi)
if np.allclose(dir, (0,0,1)):
return result
rot = shortest_arc(np.array((0,0,1)), dir)
return quaternion.rotate_vectors(rot, result)
def cone_vectors(theta, phi_steps):
"""
Generate unit vectors along the surface of the cone with aperture theta pointing toward -Z, taking
phi_steps stops along the circle
"""
theta_v = np.atleast_1d(theta)
result = np.empty((len(theta_v), phi_steps, 3), dtype=float)
phi = np.linspace(0, math.pi * 2, phi_steps, endpoint=False)
# These are spherical coordinates
result[:,:,0] = np.sin(theta_v)[:,None] * np.cos(phi)
result[:,:,1] = np.sin(theta_v)[:,None] * np.sin(phi)
result[:,:,2] = np.cos(theta_v)[:,None]
return result.squeeze()
class FrameVelocityEstimator:
def __init__(self, dt):
self.T_prev = None
self.T_diff = None
self.last_dt = None
self.dt = dt
@property
def is_available(self):
return self.T_diff is not None
def update(self, T, dt=None):
if self.T_prev is not None:
self.T_diff = (invert_T(self.T_prev) @ T)
self.T_prev = T
self.last_dt = dt
def get_twist(self, small_angle=False) -> Optional[np.ndarray]:
if self.T_diff is None:
return None
dt = self.last_dt if self.last_dt is not None else self.dt
diff = np.reshape(self.T_diff, (-1, 4,4))
out = np.zeros((diff.shape[0], 6))
out[:, :3] = self.T_diff[...,:3,3]
if small_angle:
# If the angle is small, the difference matrix is very close to I + an infinitesimal rotation.
# This is good up to about theta=0.1
out[:, 3] = self.T_diff[..., 2,1]
out[:, 4] = self.T_diff[..., 0,2]
out[:, 5] = self.T_diff[..., 1,0]
else:
out[:, 3:] = R_to_rot_vector(self.T_diff[...,:3, :3])
return np.squeeze(out / dt)
def get_obj_poses(objects: List[RigidPrim]) -> np.ndarray:
N = len(objects)
positions = np.empty((N, 3))
quats = np.empty((N, 4))
for i, obj in enumerate(objects):
p, q = obj.get_world_pose()
positions[i, :] = p
quats[i, :] = q
return pq2T(positions, quaternion.from_float_array(quats))
def integrate_twist(v: np.ndarray, w: np.ndarray, time=1):
"""
Find the matrix exponential of the 6 element twist, parameterized by
by time. Integrates the application of this twist over time.
"""
v = np.atleast_1d(v)
theta = np.linalg.norm(w)
if theta == 0:
return np.array([[1, 0, 0, v[0] * time],
[0, 1, 0, v[1] * time],
[0, 0, 1, v[2] * time],
[0, 0, 0, 1]])
else:
w_n = normalized(w)
theta *= time
# theta = time / theta
skew_w = np.array([[0, -w_n[2], w_n[1]],
[w_n[2], 0, -w_n[0]],
[-w_n[1], w_n[0], 0]])
skew_w_2 = skew_w @ skew_w
# Rodrigues' formula, forward exponential map (modern robotics 3.51)
R = np.eye(3) + (np.sin(theta) * skew_w) + ((1-np.cos(theta)) * skew_w_2)
# modern robotics 3.88, but we the amount which we move down the screw axis
# by the magnitude of the rotation
p = ((np.eye(3) * theta) + (1 - np.cos(theta)) * skew_w + (theta - np.sin(theta)) * (skew_w_2)) @ (v / np.linalg.norm(w))
return np.array([[R[0,0], R[0,1], R[0,2], p[0]],
[R[1,0], R[1,1], R[1,2], p[1]],
[R[2,0], R[2,1], R[2,2], p[2]],
[0, 0, 0, 1]])
def integrate_twist_stepwise(v: np.ndarray, w: np.ndarray, until_time: float, n_steps: int) -> np.ndarray:
""" Integrate the twist (v,w), providing 1 + until_time * n_steps points, beginning with (0,0,0)
"""
step = 1 / n_steps
result = np.empty((1 + int(until_time * n_steps), 3))
result[0] = (0,0,0)
R = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(w * step))
for i in range(1, len(result)):
result[i] = (R @ result[i-1]) + v * step
return result
def homogeneous_to_twist(Ts):
diff = np.reshape(Ts, (-1, 4,4))
out = np.zeros((diff.shape[0], 6))
out[:, :3] = Ts[...,:3,3]
out[:, 3:] = R_to_rot_vector(Ts[...,:3, :3])
return np.squeeze(out)
def lognormalize(x):
# Calculate log of all components exponentiated
a = np.logaddexp.reduce(x)
if a == float('-inf'):
# Return unchanged dist for all 0s
return x.copy()
# "Divide" all values by the max
return x - a
| 16,151 |
Python
| 32.861635 | 153 | 0.590737 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/ghost_object.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Optional
import numpy as np
from omni.isaac.core.utils.prims import get_prim_at_path, is_prim_path_valid
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf
from omni.isaac.core.prims.xform_prim import XFormPrim
import omni
from typing import Sequence
from pxr import Gf
from omni.physxcommands import UnapplyAPISchemaCommand
from srl.teleop.assistance.ghost_franka import load_ghost_material
def make_ghost(from_object_at_path, ghost_path, ghost_name, material_path="/Looks/GhostVolumetric"):
if is_prim_path_valid(ghost_path):
return
result = omni.kit.commands.execute(
"CopyPrimCommand", path_from=from_object_at_path, path_to=ghost_path, duplicate_layers=False, combine_layers=False
)
return GhostObject(ghost_path, ghost_name, material_path=material_path)
class GhostObject(XFormPrim):
def __init__(self, prim_path: str, name: str = "xform_prim", position: Optional[Sequence[float]] = None, translation: Optional[Sequence[float]] = None, orientation: Optional[Sequence[float]] = None, scale: Optional[Sequence[float]] = None, visible: Optional[bool] = False, material_path="/Looks/GhostVolumetric") -> None:
super().__init__(prim_path, name, position, translation, orientation, scale, visible)
self.material, self.material_inputs = load_ghost_material(material_path)
self.material_inputs["inputs:transmission_color"].Set((1.5, 1.5, 1.5))
self.material_inputs["inputs:emission_color"].Set((1.25, 1.25, 1.25))
self.material_inputs["inputs:emissive_scale"].Set(300.)
self._current_color = None
self._current_opacity = None
self._imageable = UsdGeom.Imageable(self.prim)
self.apply_visual_material(self.material)
self.remove_physics()
# Shadows give better depth cues, but have strange artifacts (z-fighting, and slow pop in)
#self.prim.CreateAttribute("primvars:doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
def disable_collisions(self):
# Disable colliders
for p in Usd.PrimRange(self.prim):
if p.HasAPI(UsdPhysics.CollisionAPI):
collision_api = UsdPhysics.CollisionAPI(p)
collision_api.GetCollisionEnabledAttr().Set(False)
if p.HasAPI(UsdPhysics.RigidBodyAPI):
physx_api = UsdPhysics.RigidBodyAPI(p)
physx_api.CreateRigidBodyEnabledAttr(False)
physx_api.GetRigidBodyEnabledAttr().Set(False)
def remove_physics(self):
UnapplyAPISchemaCommand(UsdPhysics.CollisionAPI, self.prim).do()
UnapplyAPISchemaCommand(UsdPhysics.RigidBodyAPI, self.prim).do()
@property
def visible(self):
return self._imageable.GetVisibilityAttr().Get() != "invisible"
def hide(self):
self._imageable.MakeInvisible()
def show(self):
self._imageable.MakeVisible()
def set_color(self, color, opacity=1.0):
if color == self._current_color and opacity == self._current_opacity:
# idempotent
return
transmission = 1.0 - opacity
def clip(value):
# Inputs seem to behave differently for 0 and close to 0 for some reason...
return Gf.Vec3f(*np.clip(value, 0.0001, 1.0))
# The colors you don't absorb will shine through.
# The color you emit shows in the absence of other colors
if color == "red":
self.material_inputs["inputs:absorption"].Set((transmission, 0, 0))
elif color == "yellow":
self.material_inputs["inputs:absorption"].Set((transmission, transmission, 0))
elif color == "green":
self.material_inputs["inputs:absorption"].Set((0, transmission, 0))
elif color == "white":
self.material_inputs["inputs:absorption"].Set(clip((opacity, opacity, opacity)))
else:
return
self._current_color = color
self._current_opacity = opacity
| 4,158 |
Python
| 42.778947 | 325 | 0.664502 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/swing_twist.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
from quaternion import quaternion as quat
from typing import Tuple
from .transforms import quat_vector_part
def swing_twist_decomposition(q: quat, axis: np.ndarray) -> Tuple[quat, quat]:
dir = quat_vector_part(q)
dot_dir_axis = dir.dot(axis)
projected = dot_dir_axis / np.linalg.norm(axis) * axis
twist = quat(q.w ,projected[0], projected[1], projected[2])
if dot_dir_axis < 0.0:
twist *= -1
twist /= twist.norm()
swing = q * twist.conjugate()
swing /= swing.norm()
return swing, twist
class SwingTwistLeash:
def __init__(self, trans_limit, rot_limit) -> None:
self.set_limits(trans_limit, rot_limit)
def set_limits(self, trans: float, rot: float):
# Radians
self.rot_limit = rot
self.trans_limit = trans
self.max_swing_mag = (1. - np.cos(self.rot_limit)) / 2
self.max_swing_mag2 = np.sin(.5 * self.rot_limit)
self.max_swing_w = np.sqrt(1.0 - self.max_swing_mag)
def apply(self, anchor_p: np.ndarray, anchor_q: quat, new_p: np.ndarray, new_q: quat):
# And now we'll apply limits to keep the target within a certain delta from the current gripper pose
limited_p = new_p
pos_diff = np.array(new_p - anchor_p)
pos_diff_norm = np.linalg.norm(pos_diff)
pos_dir = pos_diff / pos_diff_norm
if pos_diff_norm > self.trans_limit:
# Project the desired position target onto the surface of the sphere with the limit radius
limited_p = anchor_p + (pos_dir * self.trans_limit)
# Orientation limits
limited_q = new_q
# Just the relative rotation from current orientation to the proposed new target
r_delta = new_q * anchor_q.conjugate()
# Get the part of the rotation that twists about (1,0,0) and the part that swings that axis
swing, twist = swing_twist_decomposition(r_delta, np.array((1,0,0)))
swing_vec = quat_vector_part(swing)
swing_magnitude = np.linalg.norm(swing_vec)
# Cone constraint: limit swing
if (swing_magnitude > self.max_swing_mag):
limited_swing_vec = swing_vec / swing_magnitude * self.max_swing_mag
w_sign = -1 if swing.w < 0 else 1
swing = quat(w_sign * self.max_swing_w, *limited_swing_vec)
limited_q = swing * twist * anchor_q
return limited_p, limited_q
| 2,559 |
Python
| 38.384615 | 109 | 0.63306 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/assistance_extension.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance import Assistance
from omni.isaac.ui.ui_utils import cb_builder, dropdown_builder, btn_builder, combo_floatfield_slider_builder, state_btn_builder
from omni.kit.viewport.utility import get_active_viewport_window
import omni
from srl.teleop.assistance.experiment import PARTICIPANT_ID, SLOT_NAMES, configure_for_condition_index, get_ordering
from srl.teleop.assistance.logging import is_folder
from srl.teleop.assistance.spacemouse_demo import SpaceMouseManipulator
from srl.teleop.assistance.ui import ASSISTANCE_MODES, CONTROL_FRAMES, add_overlay, str_builder, multi_btn_builder
from srl.teleop.assistance.scene import ViewportScene
from srl.spacemouse.spacemouse_extension import get_global_spacemouse, get_global_spacemouse_extension
import os
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style
import numpy as np
import carb
from omni.isaac.core.utils.viewports import set_camera_view
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
import weakref
import omni.ext
import asyncio
from omni.isaac.core import World
from omni.kit.quicklayout import QuickLayout
from .logging import save_log
from functools import partial
import time
class AssistanceExtension(omni.ext.IExt):
def on_startup(self, ext_id: str):
# profiling note
from . import profiling
self._ext_id = ext_id
menu_items = [MenuItemDescription(name="Teleop Assistance", onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())]
self._menu_items = menu_items
add_menu_items(self._menu_items, "SRL")
self._settings = carb.settings.get_settings()
self._viewport = get_active_viewport_window("Viewport")
self.timeline = omni.timeline.get_timeline_interface()
self.task_ui_elements = {}
self._world_buttons = {}
self._plots = {}
self.build_ui(name="Teleop Assistance",
title="Teleop Assistance",
doc_link="",
overview="Provides assistance during human operated pick and place",
file_path=os.path.abspath(__file__),
number_of_extra_frames=3,
window_width=350,)
frame = self.get_frame(index=0)
self.build_assistance_ui(frame)
self.logging_ui = {}
frame = self.get_frame(index=1)
self.build_data_logging_ui(frame)
self.center_label, self.left_label = add_overlay(self._viewport, ext_id)
self._viewport_scene = ViewportScene(self._viewport, ext_id)
self._assistance_system = None
self._plotting_event_subscription = None
def get_frame(self, index):
if index >= len(self._extra_frames):
raise Exception("there were {} extra frames created only".format(len(self._extra_frames)))
return self._extra_frames[index]
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def shutdown_cleanup(self):
if self.center_label:
self.center_label[0].destroy()
self.center_label[1].destroy()
self.center_label = None
if self.left_label:
self.left_label[0].destroy()
self.left_label[1].destroy()
self.left_label = None
# destroy scene
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = None
def _on_logging_button_event(self, val):
self._assistance_system._on_logging_event(val)
self.logging_ui["Save Data"].enabled = True
return
def _on_save_data_button_event(self):
world = World.instance()
data_logger = world.get_data_logger()
frames = data_logger._data_frames
current_task_name = list(world.get_current_tasks())[0]
current_task = world.get_current_tasks()[current_task_name]
#user_name = self.logging_ui["User"].get_value_as_string()
user_name = str(PARTICIPANT_ID)
timestamp = time.time()
timestamp_str = time.strftime("%Y%m%d-%H%M%S")
log_path = self.logging_ui["Output Directory"].get_value_as_string()
log_name = f"{user_name}-{current_task_name}-{self.condition_i}-{timestamp_str}"
log_path = f"{log_path}/{log_name}.hdf5"
metadata = {"collected_timestamp": timestamp, "task": current_task_name, "user": user_name, "condition_id": self.condition_i, "experiment_i": self.world_i}
metadata = {**metadata, **current_task.get_params()}
def done_saving():
data_logger.reset()
# If we're saving at a shutdown point, UI and self will vanish
if hasattr(self, "logging_ui") and self.logging_ui:
self.logging_ui["Save Data"].enabled = False
self.logging_ui["Start Logging"].text = "START"
carb.log_info("Saved " + log_path)
self._viewport._post_toast_message("Saved log", "test")
asyncio.ensure_future(
save_log(log_path, frames, metadata, done=done_saving)
)
def _on_option_button_event(self, name, value):
asyncio.ensure_future(
self._assistance_system._on_ui_value_change(name, value)
)
def post_reset_button_event(self):
self.logging_ui["Start Logging"].enabled = True
self.logging_ui["Save Data"].enabled = False
def post_load_button_event(self):
self.logging_ui["Start Logging"].enabled = True
self.logging_ui["Save Data"].enabled = False
def _on_load_world(self, world_index):
self._enable_all_buttons(False, False)
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = ViewportScene(self._viewport, self._ext_id, use_scene_camera=False)
# This will close the current stage and stop the world, causing any logs to be saved
omni.usd.get_context().new_stage()
task, condition_i = configure_for_condition_index(world_index, self.task_ui_elements, PARTICIPANT_ID)
self.condition_i = condition_i
self.world_i = world_index
self._assistance_system = Assistance(task, None)
self._assistance_system.viewport_scene = self._viewport_scene
self._assistance_system.register_ui_models({
"control_frame": self.task_ui_elements["Control Frame"],
"overlay_opacity": self.task_ui_elements["Overlay Opacity"],
"assistance_mode": self.task_ui_elements["Assistance Mode"],
"avoid_obstacles": self.task_ui_elements["Avoid Obstacles"],
"suggest_grasps": self.task_ui_elements["Suggest Grasps"],
"suggest_placements": self.task_ui_elements["Suggest Placements"],
"snapping": self.task_ui_elements["Snapping"],
"use_laser": self.task_ui_elements["Laser"],
"use_surrogates": self.task_ui_elements["Surrogates"],
"center_label": self.center_label,
"left_label": self.left_label
})
async def _on_load_world_async():
found_mouse = await get_global_spacemouse_extension().discover_mouse()
if not found_mouse:
self._enable_all_buttons(True, True)
carb.log_error("Can't connect to spacemouse")
return
await self._assistance_system.load_world_async()
await omni.kit.app.get_app().next_update_async()
#self._viewport_scene.add_manipulator(lambda: SpaceMouseManipulator(grid=False))
self._assistance_system._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(True, True)
self.post_load_button_event()
self._assistance_system._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
self.timeline.play()
self._assistance_system._on_logging_event(True)
asyncio.ensure_future(_on_load_world_async())
"""if not self._plotting_event_subscription:
self._plotting_event_subscription = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_plotting_step)
)"""
return
def _on_load_spacemouse_demo(self):
from srl.teleop.assistance import DATA_DIR
if self._viewport_scene:
self._viewport_scene.destroy()
self._viewport_scene = ViewportScene(self._viewport, self._ext_id, use_scene_camera=True)
# This will close the current stage and stop the world, causing any logs to be saved
omni.usd.get_context().new_stage()
QuickLayout.load_file(f"{DATA_DIR}/experiment_layout.json", False)
async def _load_async():
set_camera_view((-1., -3, 3), (0.,0.,0.))
found_mouse = await get_global_spacemouse_extension().discover_mouse()
if not found_mouse:
carb.log_error("Can't connect to spacemouse")
return
await omni.kit.app.get_app().next_update_async()
self._viewport_scene.add_manipulator(SpaceMouseManipulator)
self._enable_all_buttons(True, True)
self.post_load_button_event()
self._plotting_event_subscription = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_plotting_step)
)
asyncio.ensure_future(_load_async())
def _on_plotting_step(self, step):
device = get_global_spacemouse()
if self._viewport_scene.manipulator:
self._viewport_scene.manipulator.update(device.get_controller_state())
def _on_reset(self):
async def _on_reset_async():
await self._assistance_system.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
asyncio.ensure_future(_on_reset_async())
return
def _on_stop(self):
async def _on_stop_async():
world = World.instance()
world.stop()
asyncio.ensure_future(_on_stop_async())
return
def _enable_all_buttons(self, load_flag, other_flag):
for btn in self._world_buttons["Load World"]:
btn.enabled=load_flag
for btn_name, btn in self._world_buttons.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = other_flag
self._world_buttons["Stop"].enabled = other_flag
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def on_shutdown(self):
self._extra_frames = []
if self._assistance_system is None:
print("self._assistance_system is none. Not sure if this is a problem")
if self._assistance_system is not None and self._assistance_system._world is not None:
self._assistance_system._world_cleanup()
if self._menu_items is not None:
self._window_cleanup()
if self._world_buttons is not None:
self._enable_all_buttons(True, False)
self.shutdown_cleanup()
return
def _window_cleanup(self):
remove_menu_items(self._menu_items, "SRL")
self._window = None
self._menu_items = None
self._world_buttons = None
return
def on_stage_event(self, event):
# event_type = omni.usd.StageEventType(event.type)
if event.type == int(omni.usd.StageEventType.CLOSED):
# If the stage is closed before on_startup has run, all of our fields will be undefined
if World.instance() is not None and hasattr(self, "_assistance_system") and self._assistance_system:
self._assistance_system._world_cleanup()
self._assistance_system._world.clear_instance()
self._assistance_system = None
# There's no World now, so in any case the user can load anew!
if hasattr(self, "_world_buttons"):
self._enable_all_buttons(True, False)
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
if self._assistance_system:
self._enable_all_buttons(True, False)
self._on_save_data_button_event()
# NOTE(3-8-22): Trying to close the world here produces segfaults
return
def build_ui(self, name, title, doc_link, overview, file_path, number_of_extra_frames, window_width):
self._window = omni.ui.Window(
name, width=window_width, height=0, visible=True, dockPreference=ui.DockPreference.RIGHT_TOP
)
self._window.deferred_dock_in("Stage", ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
self._extra_frames = []
with self._window.frame:
with ui.VStack(spacing=5, height=0):
setup_ui_headers(self._ext_id, file_path, title, doc_link, overview)
self._controls_frame = ui.CollapsableFrame(
title="World Controls",
width=ui.Fraction(1),
height=0,
collapsed=False,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with self._controls_frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
ui.Label(f"You are participant {PARTICIPANT_ID}", width=ui.Fraction(1), alignment=ui.Alignment.CENTER, tooltip="Use this ID whenever prompted")
ui.Spacer(height=5)
dict = {
"label": "Load World",
"type": "button",
"text": SLOT_NAMES,
"tooltip": ["Load World and Task" for _ in range(len(SLOT_NAMES) + 1)],
"on_clicked_fn": [self._on_load_spacemouse_demo] + [partial(self._on_load_world,i) for i in range(len(SLOT_NAMES) - 1)],
}
self._world_buttons["Load World"] = multi_btn_builder(**dict)
for btn in self._world_buttons["Load World"]:
btn.enabled=True
dict = {
"label": "Stop",
"type": "button",
"text": "Stop",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_stop,
}
self._world_buttons["Stop"] = btn_builder(**dict)
self._world_buttons["Stop"].enabled = False
dict = {
"label": "Reset",
"type": "button",
"text": "Reset",
"tooltip": "Reset robot and environment",
"on_clicked_fn": self._on_reset,
}
self._world_buttons["Reset"] = btn_builder(**dict)
self._world_buttons["Reset"].enabled = False
ui.Spacer(height=10)
ui.Label(f"Version 6430.{''.join(map(str,get_ordering(PARTICIPANT_ID)))}", width=ui.Fraction(1), alignment=ui.Alignment.CENTER, tooltip="")
with ui.VStack(style=get_style(), spacing=5, height=0):
for i in range(number_of_extra_frames):
self._extra_frames.append(
ui.CollapsableFrame(
title="",
width=ui.Fraction(0.33),
height=0,
visible=False,
collapsed=True,
style=get_style(),
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
)
def build_assistance_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
frame.title = "Settings"
frame.visible = True
dict = {
"label": "Control Frame",
"tooltip": "The coordinate system used to map control input to robot motion",
#"on_clicked_fn": self._on_control_frame_event,
"default_val": 2,
"items": CONTROL_FRAMES
}
self.task_ui_elements["Control Frame"] = dropdown_builder(**dict)
dict = {
"label": "Assistance Overlay Opacity",
"tooltip": ["How opaque the overlaid suggestions should be", ""],
"default_val": .2,
"min": 0.0,
"max": 1.0
}
self.task_ui_elements["Overlay Opacity"] = combo_floatfield_slider_builder(**dict)
dict = {
"label": "Assistance Mode",
"tooltip": "The format of assistance provided",
#"on_clicked_fn": self._on_assistance_mode_event,
"items": ASSISTANCE_MODES
}
self.task_ui_elements["Assistance Mode"] = dropdown_builder(**dict)
dict = {
"label": "Use Surrogates",
"tooltip": "Whether to use interactive surrogates to select suggestions",
"default_val": False,
"on_clicked_fn": partial(self._on_option_button_event, "use_surrogates"),
}
self.task_ui_elements["Surrogates"] = cb_builder(**dict)
dict = {
"label": "Avoid Obstacles",
"tooltip": "Avoid Obstacles",
"default_val": False,
"on_clicked_fn": partial(self._on_option_button_event, "avoid_obstacles"),
}
self.task_ui_elements["Avoid Obstacles"] = cb_builder(**dict)
dict = {
"label": "Suggest Grasps",
"tooltip": "Whether to suggest grasps",
"default_val": True,
"on_clicked_fn": partial(self._on_option_button_event, "suggest_grasps"),
}
self.task_ui_elements["Suggest Grasps"] = cb_builder(**dict)
dict = {
"label": "Suggest Placements",
"tooltip": "Whether to suggest placements",
"default_val": True,
"on_clicked_fn": partial(self._on_option_button_event, "suggest_placements"),
}
self.task_ui_elements["Suggest Placements"] = cb_builder(**dict)
dict = {
"label": "Snapping",
"tooltip": "Whether to snap suggestions",
"default_val": True,
"on_clicked_fn": partial(self._on_option_button_event, "snapping"),
}
self.task_ui_elements["Snapping"] = cb_builder(**dict)
dict = {
"label": "Laser",
"tooltip": "Enable a laser pointer attached to the gripper",
"default_val": False,
"on_clicked_fn": partial(self._on_option_button_event, "use_laser"),
}
self.task_ui_elements["Laser"] = cb_builder(**dict)
return
def build_data_logging_ui(self, frame):
with frame:
with ui.VStack(spacing=5):
frame.title = "Data Logging"
frame.visible = True
dict = {
"label": "Output Directory",
"type": "stringfield",
"default_val": os.path.expanduser('~/Documents/trajectories'),
"tooltip": "Output Directory",
"on_clicked_fn": None,
"use_folder_picker": True,
"item_filter_fn": is_folder,
"read_only": False,
}
self.logging_ui["Output Directory"] = str_builder(**dict)
dict = {
"label": "User",
"type": "stringfield",
"default_val": "unspecified",
"tooltip": "Name of operator",
"on_clicked_fn": None,
"use_folder_picker": False,
"read_only": False,
}
self.logging_ui["User"] = str_builder(**dict)
dict = {
"label": "Start Logging",
"type": "button",
"a_text": "START",
"b_text": "PAUSE",
"tooltip": "Start Logging",
"on_clicked_fn": self._on_logging_button_event,
}
self.logging_ui["Start Logging"] = state_btn_builder(**dict)
self.logging_ui["Start Logging"].enabled = False
dict = {
"label": "Save Data",
"type": "button",
"text": "Save Data",
"tooltip": "Save Data",
"on_clicked_fn": self._on_save_data_button_event,
}
self.logging_ui["Save Data"] = btn_builder(**dict)
self.logging_ui["Save Data"].enabled = False
return
| 22,036 |
Python
| 43.609312 | 167 | 0.548058 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/profiling.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from time import perf_counter
import carb.profiler
profile_table = {}
name_stack = []
class profile:
def __init__(self, name="", active=True) -> None:
self.name = name
self.active = active
pass
def __enter__(self):
self.time = perf_counter()
carb.profiler.begin(1, self.name, active=self.active)
return self
def __exit__(self, type, value, traceback):
self.time = perf_counter() - self.time
self.readout = f'{self.name} Time: {self.time * 1000:.2f} milliseconds'
carb.profiler.end(1, self.active)
def is_profiler_active():
# Flip this to True if you want profiling information to print out
return False
def begin(mask, name, stack_offset=0, active=False):
if not is_profiler_active() or not active:
return
profile_table[name] = perf_counter()
name_stack.append(name)
def end(mask, active=False):
if not is_profiler_active() or not active:
return
start_stack_depth = len(name_stack)
if start_stack_depth == 0:
return
name = name_stack.pop()
print(" " * (start_stack_depth - 1) + f"{name}: {(perf_counter() - profile_table[name]) * 1000:.2f}ms")
del profile_table[name]
# TBR
carb.profiler.begin = begin
carb.profiler.end = end
| 1,432 |
Python
| 26.557692 | 108 | 0.638966 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/shapenet_import.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from collections import defaultdict
import omni.client
import omni.kit
import omni.usd
import asyncio
import os
from pxr import UsdGeom, Gf, Tf, Usd, UsdShade, UsdPhysics
import random
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.prims import is_prim_path_valid, get_prim_type_name, delete_prim, get_all_matching_child_prims, get_prim_at_path
from omni.isaac.core.prims.xform_prim import XFormPrim
from omni.isaac.core.prims.rigid_prim import RigidPrim
from typing import Optional, Sequence
import numpy as np
from srl.teleop.assistance.transforms import invert_T
from omni.physx.scripts.utils import setColliderSubtree
from os import listdir
from os.path import isfile, join
ACRONYM_BY_CAT = None
ACRONYM_ROOT = os.environ["HOME"] + '/data/acronym/grasps'
def load_acronym_index():
if not os.stat(ACRONYM_ROOT):
return None
acronym_paths = [f for f in listdir(ACRONYM_ROOT) if isfile(join(ACRONYM_ROOT, f))]
acronym_tuples = [f[:f.rfind(".")].split("_") for f in acronym_paths]
by_cat = defaultdict(lambda: defaultdict(list))
for i, (cat, obj, scale) in enumerate(acronym_tuples):
by_cat[cat][obj].append((float(scale), acronym_paths[i]))
return by_cat
def file_exists_on_omni(file_path):
result, _ = omni.client.stat(file_path)
if result == omni.client.Result.OK:
return True
return False
async def create_folder_on_omni(folder_path):
if not file_exists_on_omni(folder_path):
result = await omni.client.create_folder_async(folder_path)
return result == omni.client.Result.OK
async def convert(in_file, out_file):
#
import omni.kit.asset_converter as assetimport
# Folders must be created first through usd_ext of omni won't be able to create the files creted in them in the current session.
out_folder = out_file[0 : out_file.rfind("/") + 1]
# only call create_folder_on_omni if it's connected to an omni server
if out_file.startswith("omniverse://"):
await create_folder_on_omni(out_folder + "materials")
def progress_callback(progress, total_steps):
pass
converter_context = omni.kit.asset_converter.AssetConverterContext()
# setup converter and flags
converter_context.as_shapenet = True
converter_context.single_mesh = True
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(in_file, out_file, progress_callback, converter_context)
success = True
while True:
success = await task.wait_until_finished()
if not success:
await asyncio.sleep(0.1)
else:
break
return success
class ShapeNetPrim(RigidPrim):
def __init__(self, prim_path: str, metadata, name: str = "rigid_prim", position: Optional[Sequence[float]] = None, translation: Optional[Sequence[float]] = None, orientation: Optional[Sequence[float]] = None, scale: Optional[Sequence[float]] = None, visible: Optional[bool] = None, mass: Optional[float] = None, density: Optional[float] = None, linear_velocity: Optional[np.ndarray] = None, angular_velocity: Optional[np.ndarray] = None) -> None:
super().__init__(prim_path, name, position, translation, orientation, scale, visible, mass, density, linear_velocity, angular_velocity)
unit = metadata["unit"]
self.materials = []
self.material_inputs = {}
self.shaders = []
self.shader_inputs = {}
self._geometry_prims = []
for p in Usd.PrimRange(self.prim):
prim_type = get_prim_type_name(p.GetPath())
if p.GetPath() != self.prim_path and prim_type == "Xform":
as_xform = XFormPrim(p.GetPath())
as_xform.set_local_scale((unit, unit, unit))
self._geometery_xform = as_xform
self._geometry_prims = p.GetChildren()
self._geometry_prims = [UsdGeom.Mesh(raw) for raw in self._geometry_prims]
elif prim_type == "Material":
as_material = UsdShade.Material(p)
self.materials.append(as_material)
elif prim_type == "Shader":
as_shader = UsdShade.Shader(p)
inputs = as_shader.GetInputs()
self.shaders.append(as_shader)
self.shader_inputs[p.GetPath()] = {}
for input in inputs:
self.shader_inputs[p.GetPath()][input.GetFullName()] = input
self.add_colliders()
# Merge component meshes
all_points = []
all_indices = []
all_counts = []
index_offset = 0
for component in self._geometry_prims:
points = component.GetPointsAttr().Get()
indices = component.GetFaceVertexIndicesAttr().Get()
counts = component.GetFaceVertexCountsAttr().Get()
offset_indices = [x + index_offset for x in indices]
all_points.extend(points)
all_indices.extend(offset_indices)
all_counts.extend(counts)
index_offset = index_offset + len(points)
self.collision_geom = UsdGeom.Mesh.Define(get_current_stage(), prim_path + "/merged")
scale = self.collision_geom.AddXformOp(UsdGeom.XformOp.TypeScale, UsdGeom.XformOp.PrecisionDouble, "")
scale.Set(Gf.Vec3d(unit, unit, unit))
self.collision_geom.CreatePointsAttr(all_points)
self.collision_geom.CreateFaceVertexIndicesAttr(all_indices)
self.collision_geom.CreateFaceVertexCountsAttr(all_counts)
UsdGeom.Imageable(self.collision_geom).MakeInvisible()
self.make_visible()
def make_visible(self):
#
for shader in self.shaders:
opacity_input = self.shader_inputs[shader.GetPath()].get("inputs:opacity_constant", None)
if opacity_input:
opacity_input.Set(1.0)
def add_colliders(self, approximationShape="convexDecomposition"):
#
setColliderSubtree(self.prim, approximationShape)
"""for mesh in self._geometry_prims:
UsdPhysics.CollisionAPI.Apply(mesh.GetPrim())
meshCollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(mesh.GetPrim())
meshCollisionAPI.CreateApproximationAttr().Set("none")"""
@property
def geom(self):
return self.collision_geom
async def add_shapenetsem_model(category, nth, prim_path, position, name):
global ACRONYM_BY_CAT
try:
import meshsets
#
os.environ['MESHSETS_LOCAL_ROOT_DIR'] = os.environ['HOME'] + '/data/meshes'
dataset = meshsets.load_dataset('ShapeNetSem watertight')
obj_filepath = dataset.get_filenames(category)[nth]
obj_filename = obj_filepath[obj_filepath.rfind("/",1) + 1:]
obj_name = obj_filename[:obj_filename.rfind(".")]
except ImportError:
print("Couldn't import nvidia-meshsets. Can't add shapenet model.")
return None
if ACRONYM_BY_CAT is None:
ACRONYM_BY_CAT = load_acronym_index()
scale = None
if ACRONYM_BY_CAT is not None:
import h5py
scales = ACRONYM_BY_CAT[category][obj_name]
scale, filename = scales[0]
data = h5py.File(ACRONYM_ROOT + "/" + filename, "r")
grasps = np.array(data["grasps/transforms"])
success = np.array(data["grasps/qualities/flex/object_in_gripper"])
offset = np.identity(4)
# (invert_T(get_prim_world_T_meters("/motion_controller_target")) @ get_prim_world_T_meters(self.franka.prim_path + "/panda_hand"))[:3, 3]
offset[2,3] = .06
grasps = grasps[success == 1] @ offset
else:
grasps = None
dataset_name = obj_filepath.replace(os.environ['MESHSETS_LOCAL_ROOT_DIR'], '')
dataset_name = dataset_name[1:dataset_name.find("/",1)]
converted_folder_name = os.environ["MESHSETS_LOCAL_ROOT_DIR"] + "/" + dataset_name + "/usd"
out_filepath = converted_folder_name + "/" + obj_name[:obj_name.rfind(".")] + ".usd"
import pathlib
pathlib.Path(converted_folder_name).mkdir(parents=True, exist_ok=True)
pathlib.Path(converted_folder_name + "/materials").mkdir(parents=True, exist_ok=True)
if not os.path.isfile(out_filepath):
await convert(obj_filepath, out_filepath)
added = add_reference_to_stage(out_filepath, prim_path)
metadata = dataset.get_metadata(obj_filepath)
if scale is not None:
metadata["unit"] = scale
#
wrapped = ShapeNetPrim(prim_path, metadata, name=name, translation=position, mass=0.03)
wrapped.grasp_annotations = grasps
return wrapped
| 8,763 |
Python
| 41.543689 | 450 | 0.65263 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/proposals.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import itertools
import math
import time
from enum import Enum
from typing import List, Optional, Union
import carb
import numpy as np
import quaternion
from omni.isaac.core.prims.rigid_prim import RigidPrim
from srl.teleop.assistance.motion_commander import ApproachParams
from srl.teleop.assistance.suggestions import generate_candidate_grasps, generate_candidate_placements
from srl.teleop.assistance.transforms import cone_vectors, get_obj_poses, invert_T, make_rotation_matrix, normalize, orthogonalize, pack_Rp, pq2T, \
rotate_vec_by_quat, shortest_arc, transform_point
class InvalidReason(Enum):
VALID = 0
UNREACHABLE = 1
MOVING = 2
GROUND_COLLISION = 3
SCENE_COLLISION = 4
UNKNOWN = 5
class GroupedPoseProposalTable:
def __init__(self, poses: np.ndarray, owning_objects: List[RigidPrim], obj_poses: np.ndarray, pose_owners: np.ndarray, groups: Optional[np.ndarray] = None):
self._poses = poses
self._poses_world = None
if groups is None:
self._groups = np.zeros((poses.shape[0]))
else:
self._groups = groups
self.objects = owning_objects
self.obj_Ts = obj_poses
self.objects_dirty = np.full((len(obj_poses),), True, dtype=bool)
self._owners = pose_owners
self._configs = np.full((len(poses), 7), np.nan, dtype=float)
self._valid = np.full((len(poses)), InvalidReason.UNKNOWN.value, dtype=int)
def update_world_poses(self, updated_poses: np.ndarray):
if self._poses_world is None:
self._poses_world = np.empty_like(self._poses)
self._poses_world[:] = updated_poses
def update_world_poses_masked(self, mask: np.ndarray, updated_poses: np.ndarray):
if self._poses_world is None:
self._poses_world = np.empty_like(self._poses)
self._poses_world[mask] = updated_poses
def mask_by_owner(self, owner_id: int) -> np.ndarray:
return self._owners == owner_id
@property
def valid(self):
return self._valid == InvalidReason.VALID.value
@property
def proposable(self):
return self.valid
def invalidate(self, mask: np.ndarray, reason: InvalidReason):
self._valid[mask] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[mask].fill(np.nan)
def invalidate_submask(self, mask: np.ndarray, submask: np.ndarray, reason: InvalidReason):
masked, = np.where(mask)
self._valid[masked[submask]] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[masked[submask]].fill(np.nan)
def invalidate_all(self, reason: InvalidReason):
self._valid[:] = reason.value
self._configs.fill(np.nan)
def __len__(self):
return self._poses.shape[0]
def empty(self):
return self.__len__() == 0
class Proposal:
def __init__(self, identifier: int, table: GroupedPoseProposalTable) -> None:
self._table = table
self.identifier = identifier
@property
def valid(self):
return self._table._valid[self.identifier] == 0
@property
def T_obj(self):
return self._table._poses[self.identifier][:]
@property
def T_world(self):
return self._table._poses_world[self.identifier][:]
def mark_invalid(self, reason: InvalidReason):
self._table._valid[self.identifier] = reason.value
class FixedTargetProposal:
def __init__(self, target_T: np.ndarray):
self.target_T = target_T
self.joint_config = np.full(9, np.nan)
@property
def T_world(self):
return self.target_T
@property
def valid(self):
return True
"""
Helpful geometry references:
* Surfaces: https://en.wikipedia.org/wiki/Surface_(mathematics)
"""
class GraspProposal(Proposal):
"""Proposals are suggestions that have been posed to the user. They can become invalid due to kinematics, collision, etc,
and they can be in any of several interaction states.
"""
def __init__(self, identifier: int, table: GroupedPoseProposalTable) -> None:
super().__init__(identifier, table)
@property
def obj_T(self) -> RigidPrim:
return self._table.obj_Ts[self.obj_id]
@property
def obj_id(self) -> int:
return self._table._owners[self.identifier]
def map_velocity_input(self, position: np.ndarray, vel: np.ndarray):
if np.linalg.norm(vel) < 0.0001:
# Fixture is undefined for 0 vel
return vel
# Prefer straight line to goal
line_to_goal = self.T_world[:3,3] - position
D = np.array([line_to_goal]).T
span_D = D @ (np.linalg.pinv(D.T @ D) @ D.T)
goal_dist = np.linalg.norm(line_to_goal)
# Lower dist -> more attenuation of motion not allowed by fixture
attenuation = sigmoid(goal_dist, .35, 5.0)
return vel @ (span_D + attenuation * (np.identity(3) - span_D))
@property
def joint_config(self):
return self._table._configs[self.identifier][:]
@property
def valid(self):
return self._table._valid[self.identifier] == InvalidReason.VALID.value
def update_eff_goal(self, eff_T_world, joint_config):
self._table._poses_world[self.identifier] = eff_T_world
self._table._configs[self.identifier] = joint_config
self._table._valid[self.identifier] = InvalidReason.VALID.value
def get_eff_T(self):
"""
The target pose where the end effector should be for the grasp. Not guaranteed to be reachable.
"""
return self.obj_T.dot(self.T_obj)
def build_approach_grasp_sample_pattern(n_rotations=14, max_rotation=math.pi, n_tilts=12, n_standoffs=1, n_neighbors=18):
z_rotations = np.zeros((1 + n_rotations,4,4))
theta = np.empty((1 + n_rotations))
theta[0] = 0
theta[1: 1 + (n_rotations // 2)] = np.linspace(-max_rotation / 2, 0, n_rotations // 2, endpoint=True)
theta[1 + (n_rotations //2):] = np.linspace(max_rotation / 2, 0, n_rotations // 2, endpoint=True)
z_rotations[:,0,0] = np.cos(theta)
z_rotations[:,0,1] = -np.sin(theta)
z_rotations[:,1,0] = np.sin(theta)
z_rotations[:,1,1] = np.cos(theta)
z_rotations[:,2,2] = 1
z_rotations[:,3,3] = 1
if n_neighbors < 10:
angle = np.linspace(0, math.pi * 2, n_neighbors // 2, endpoint=False)
rad = np.array((0.0025,0.005))
xy_offsets = np.vstack([(0,0), (np.array([np.cos(angle), np.sin(angle)]).T[None] * rad[:, None, None]).reshape(-1, 2)])
else:
# Use a hexagonal pattern to pack points efficiently
angle = np.empty((n_neighbors + 1))
angle[0] = 0
angle[1:7] = np.linspace(0, math.pi * 2, 6, endpoint=False)
angle[7:] = np.linspace(0, math.pi * 2, 12, endpoint=False)
rad = np.empty((n_neighbors + 1))
rad[0] = 0
rad[1:7] = .005
rad[7:] = .0075
xy_offsets = np.array([np.cos(angle), np.sin(angle)]).T * rad[:, None]
normals = np.vstack([(0,0,1), cone_vectors((0.1, 0.2, 0.3), n_tilts //3).reshape((-1, 3))])
tilts_R = make_rotation_matrix(normals, np.full_like(normals, (1,0,0), dtype=float))
grasp_Ts = np.zeros((n_standoffs + 1, n_rotations + 1, n_neighbors + 1, n_tilts + 1, 4, 4))
grasp_Ts[..., :, :] = np.identity(4)
grasp_Ts[..., (0,1),3] = xy_offsets[None, None,:,None]
points_view = grasp_Ts[..., :, 3]
points_view[1:,..., 2] = (.0075 * np.mgrid[1:n_standoffs + 1])[:, None, None, None]
grasp_Ts[..., :, 3] = points_view
grasp_Ts[..., :3, :3] = tilts_R[None, None,None, :]
grasp_Ts[:] = grasp_Ts[:, 0, :, :][:, None, :, :] @ z_rotations[None,:,None, None]
return np.reshape(grasp_Ts, (-1, 4, 4))
SAMPLER_PATTERN = build_approach_grasp_sample_pattern()
class GraspNormalProposalTable():
def __init__(self, object: RigidPrim, approach_T: np.ndarray, point: np.ndarray, normal: np.ndarray) -> None:
self.object = object
self.point = point
self.normal = normal
ee_ax = approach_T[:3, 0]
ee_ay = approach_T[:3, 1]
proposed_face_R = np.array([ee_ax, ee_ay, -normal]).T
R = orthogonalize(proposed_face_R, prioritize=(2,0,1))
T = pack_Rp(R, point)
carb.profiler.begin(1, "buildnormaltable", active=True)
self._poses_world = T @ SAMPLER_PATTERN
carb.profiler.end(1, True)
self._valid = np.full((len(self._poses_world)), InvalidReason.UNKNOWN.value, dtype=int)
self._configs = np.full((len(self._poses_world), 7), np.nan, dtype=float)
@property
def grasp_Ts(self):
return self._poses_world
@property
def valid(self):
return self._valid == InvalidReason.VALID.value
@property
def proposable(self):
return self.valid
def invalidate(self, mask: np.ndarray, reason: InvalidReason):
self._valid[mask] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[mask].fill(np.nan)
def invalidate_submask(self, mask: np.ndarray, submask: np.ndarray, reason: InvalidReason):
masked, = np.where(mask)
self._valid[masked[submask]] = reason.value
# Ensure that consumers don't get stale IK solutions
self._configs[masked[submask]].fill(np.nan)
class PlacementProposal(Proposal):
def __init__(self, identifier, table, support_obj, place_obj) -> None:
super().__init__(identifier, table)
self.support_obj = support_obj
self.place_obj = place_obj
def update_eff_goal(self, eff_T_world, joint_config):
self._table._poses_world[self.identifier] = eff_T_world
self._table._configs[self.identifier] = joint_config
self._table._valid[self.identifier] = InvalidReason.VALID.value
def get_placement_T(self):
"""
The target pose to place the object into (world frame). Not guaranteed to be reachable.
"""
support_T = pq2T(*self.support_obj.get_world_pose())
return support_T.dot(self.T_obj)
def get_support_normal(self):
# T_obj position is vector from the support centroid to the place centroid in the support frame
# Rotate it into the global frame
return normalize(pq2T(*self.support_obj.get_world_pose())[:3,:3] @ self.T_obj[:3, 3])
def sigmoid(x: Union[float, np.array], x_midpoint: float, steepness: float):
"""Maps numbers to [0,1], linearly near midpoint, then logarithmically at tails
"""
return 1. / (1. + np.exp(-steepness * (x - x_midpoint)))
class PlanePlaneProposalTable:
def __init__(self, owning_objects: List[RigidPrim], obj_poses: np.ndarray, support_centroid: np.ndarray, support_normals: np.ndarray, facet_object_owner: np.ndarray, facet_boundaries: List[List[int]]):
self.owning_objects = owning_objects
self.support_centroids = support_centroid
self.support_normals = support_normals
self.facet_object_owner = facet_object_owner
self._object_poses = obj_poses.copy()
self._valid = np.full((len(support_centroid)), InvalidReason.UNKNOWN.value, dtype=int)
def update_object_poses(self, poses: np.ndarray):
self._object_poses[:] = poses
def get_centroids_world(self, mask=None):
if mask is None:
# No op mask
mask = ...
world_Ts = self._object_poses[self.facet_object_owner][mask] @ pack_Rp(np.identity(3), self.support_centroids[mask])
return world_Ts[...,:3, 3]
def get_normals_world(self, mask=None):
if mask is None:
# No op mask
mask = ...
result = self._object_poses[self.facet_object_owner][mask][...,:3,:3] @ self.support_normals[mask][..., None]
return result.squeeze()
class PlanePlaneProposal():
def __init__(self, table: PlanePlaneProposalTable, support_index: int, place_index: int) -> None:
self._table = table
self.support_index = support_index
self.place_index = place_index
self.trans_offset = None
self.rot_offset = None
self.T_world = None
# FIXME: Check for 0 dot product
self.support_a1 = (1,0,0)
self.support_a2 = np.cross(self.support_normal_world, (1,0,0))
D = np.array([self.support_a1, self.support_a2]).T
self.span_D = D @ (np.linalg.pinv(D.T @ D) @ D.T)
self._valid = InvalidReason.VALID
@property
def support_obj(self) -> RigidPrim:
return self._table.owning_objects[self._table.facet_object_owner[self.support_index]]
@property
def support_obj_T(self) -> RigidPrim:
return self._table._object_poses[self._table.facet_object_owner[self.support_index]]
@property
def support_normal(self) -> np.ndarray:
return self._table.support_normals[self.support_index]
@property
def support_normal_world(self) -> np.ndarray:
return self._table.get_normals_world(mask=self.support_index)
@property
def support_centroid(self) -> np.ndarray:
return self._table.support_centroids[self.support_index]
@property
def support_centroid_world(self) -> np.ndarray:
return self._table.get_centroids_world(mask=self.support_index)
@property
def place_obj(self) -> RigidPrim:
return self._table.owning_objects[self._table.facet_object_owner[self.place_index]]
@property
def place_obj_T(self) -> np.ndarray:
return self._table._object_poses[self._table.facet_object_owner[self.place_index]]
@property
def place_normal(self) -> np.ndarray:
return self._table.support_normals[self.place_index]
@property
def support_T(self) -> np.ndarray:
# We'll take the normal as the z axis of it's local coordinate space,
# create a shortest rotation to get the object z to match that z,
# then use that rotation to define x and y
assert False
@property
def place_normal_world(self) -> np.ndarray:
return self._table.get_normals_world(mask=self.place_index)
@property
def place_centroid(self) -> np.ndarray:
return self._table.support_centroids[self.place_index]
@property
def place_centroid_world(self) -> np.ndarray:
return self._table.get_centroids_world(mask=self.place_index)
@property
def support_p(self) -> np.ndarray:
return self._table.support_centroids[self.support_index]
def map_velocity_input(self, position: np.ndarray, vel: np.ndarray):
if np.linalg.norm(vel) < 0.0001:
# Fixture is undefined for 0 vel
return vel
cur_p, cur_q = self.place_obj.get_world_pose()
cur_p += rotate_vec_by_quat(self.place_p, quaternion.from_float_array(cur_q))
# TODO: Make sure we should be using cur_p and not the position arg
plane_dist = self.support_normal.dot(cur_p)
# Lower dist -> more attenuation of motion not allowed by fixture
attenuation = sigmoid(plane_dist, .35, 5.0)
return vel @ (self.span_D + attenuation * (np.identity(3) - self.span_D))
def project_control_constraint_plane(self, vector: np.ndarray):
#
assert False
def project_to_constraint(self, point_world, point_obj):
# We'll work in the frame of the support object
support_obj_T = self.support_obj_T
support_normal = self.support_normal
support_centroid = self.support_centroid
place_centroid_in_support = transform_point(point_world, invert_T(support_obj_T))
#viz_axis_named("support", cur_p, cur_q, scale=(.2,.2,.2))
from_v = place_centroid_in_support - support_centroid
amount_orthogonal = np.dot(support_normal, from_v)
proj_on_plane = place_centroid_in_support - amount_orthogonal * support_normal
#
return transform_point(proj_on_plane + np.linalg.norm(point_obj) * support_normal, support_obj_T)
def project_current_to_solution(self):
# Where is the placement point on the plane right now?
current_in_plane = self.project_to_constraint(self.place_centroid_world, self.place_centroid)
self.trans_offset = current_in_plane
def update_proposal(self, trans: np.ndarray):
#trans_in_plane = project_control_constraint_plane(trans)
trans_in_plane = (trans[0], trans[1], 0)
self.trans_offset += trans_in_plane
def get_placement_T(self):
# Are the normals already parallel?
normal_dot = self.support_normal.dot(self.place_normal)
if normal_dot > .99999:
# Same direction -> 180 about arbitrary axis
alignment_rotation = quaternion.quaternion(0,0,1,0)
elif normal_dot < -.999999:
# Already exactly opposing -> identity quat
alignment_rotation = quaternion.quaternion(1,0,0,0)
else:
# Shortest arc between the vectors
a = np.cross(self.support_normal, self.place_normal)
# w is simple because we have unit normals: sqrt(norm(v1)**2 * norm(v2)**2) -> 1
alignment_rotation = quaternion.quaternion(1, *a).normalized()
placement_T_obj = rotate_vec_by_quat(self.place_centroid, alignment_rotation)
return pq2T(self.support_centroid + -placement_T_obj, alignment_rotation)
"""T = pack_Rp(axes_to_mat(self.place_normal, (0,0,1)), -placement_T_obj + self.current_offset)
return T"""
@property
def valid(self):
return self._valid
def mark_invalid(self, reason: InvalidReason):
self._valid = reason
def build_proposal_tables(collision_checker, objects, fixed_geometry, gripper_collision_mesh):
obj_Ts = get_obj_poses(objects)
fixed_Ts = get_obj_poses(fixed_geometry)
candidates_by_obj = [generate_candidate_grasps(obj) for obj in objects]
per_obj = []
owners = []
for candidates, (i, obj) in zip(candidates_by_obj, enumerate(objects)):
if len(candidates) == 0:
continue
counts = collision_checker.query(candidates, from_mesh=gripper_collision_mesh, to_mesh=obj.geom, render=False, query_name=f"{obj.name}_grasp_filter")
#viz_axis_named_Ts(obj.name, pq2T(*obj.get_world_pose()) @ candidates, (.2,.2,.2))
non_colliding = candidates[counts == 0]
# NOTE: No guarantee there will be any poses left...
per_obj.append(non_colliding)
owners.append(np.full((len(non_colliding)), i))
if len(per_obj) == 0:
per_obj = [np.empty((0, 4, 4))]
owners = [np.empty((0,), dtype=int)]
grasp_suggestions = GroupedPoseProposalTable(np.vstack(per_obj), None, obj_Ts, np.hstack(owners))
placement_suggestions = [None for _ in objects]
# Break placement poses into tables based on the object in the gripper
for place_i, to_place in enumerate(objects):
placement_suggestions[place_i] = [None for _ in objects]
per_obj = []
owners = []
for align_j, align_with in enumerate(objects):
if place_i == align_j:
continue
placements = generate_candidate_placements(to_place, align_with)
per_obj.append(placements)
owners.append(np.full((len(placements),), align_j))
if len(per_obj) == 0:
per_obj = [np.empty((0, 4, 4))]
owners = [np.empty((0,), dtype=int)]
placement_suggestions[place_i] = GroupedPoseProposalTable(np.vstack(per_obj), None, obj_Ts, np.hstack(owners))
"""if place_i == 1:
align_T = pq2T(*self.objects[align_j].get_world_pose())
for l, placement_T in enumerate(self.placement_suggestions[place_i][align_j]):
viz_axis_named_T(f"placement_{place_i}_{align_j}_{l}", align_T.dot(placement_T), scale=(0.4,0.4,0.4))"""
# Precompute all object support facets and their properties
centroids, normals, area, boundary = [], [], [], []
for obj in itertools.chain(objects, fixed_geometry):
if not hasattr(obj, 'geom'):
continue
support = collision_checker.get_support_surfaces(obj.geom)
centroids.append(support[0])
normals.append(support[1])
area.append(support[3])
boundary.append(support[4])
support_centroids = np.vstack(centroids)
support_normals = np.vstack(normals)
facet_owners = [[i] * len(centroids) for i, centroids in enumerate(centroids)]
facet_owners = np.fromiter(itertools.chain(*facet_owners), int)
plane_proposals = PlanePlaneProposalTable(objects, np.vstack((obj_Ts, fixed_Ts)), support_centroids, support_normals, facet_owners, boundary)
return grasp_suggestions, placement_suggestions, plane_proposals
def make_approach_params_for_proposal(proposal):
if isinstance(proposal, GraspProposal):
# Pull out the Z axis of the target
approach_axis = proposal.T_world[:3, 2]
return ApproachParams(direction=0.15 * approach_axis, std_dev=0.02)
elif isinstance(proposal, PlacementProposal):
approach_axis = -proposal.get_support_normal()
return ApproachParams(direction=0.15 * approach_axis, std_dev=0.02)
elif isinstance(proposal, PlanePlaneProposal):
approach_axis = -proposal.support_normal_world
return ApproachParams(direction=0.15 * approach_axis, std_dev=0.02)
else:
return None
| 21,540 |
Python
| 37.673249 | 205 | 0.634912 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/spacemouse_demo.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
import quaternion
from omni.isaac.core.utils.rotations import euler_angles_to_quat
from omni.ui import scene as sc
from omni.ui import color as cl
from srl.teleop.assistance.transforms import integrate_twist_stepwise, integrate_twist
class SpaceMouseManipulator(sc.Manipulator):
def __init__(self, grid=True, axis_colors=True, **kwargs):
super().__init__(**kwargs)
self.current_twist = np.zeros(6, dtype=float)
self.grid = grid
def on_build(self):
T = np.eye(4)
points = integrate_twist_stepwise(self.current_twist[:3], self.current_twist[3:], 1, 20)
point_delta = np.linalg.norm(points[0] - points[1]) * 20
#point_deltas = np.linalg.norm(np.diff(points, axis=0), axis=1)
target_T = integrate_twist(self.current_twist[:3], self.current_twist[3:], 1)
# axes
with sc.Transform(transform=sc.Matrix44(*T.T.flatten())):
if self.grid:
t = 1
# Draw a ground grid
for v in np.linspace(-2, 2, 20):
sc.Line([v, -2, -1], [v, 2, -1], color=cl("#444444ff"), thickness=t)
sc.Line([-2, v, -1], [2, v, -1], color=cl("#444444ff"), thickness=t)
k = .25
t = 4
# Draw faint origin axis
sc.Line([0, 0, 0], [k, 0, 0], color=cl("#ff000066"), thickness=t)
sc.Line([0, 0, 0], [0, k, 0], color=cl("#00ff0066"), thickness=t)
sc.Line([0, 0, 0], [0, 0, k], color=cl("#0000ff66"), thickness=t)
opacity = max(point_delta, .2)
sc.Curve(
points.tolist(),
thicknesses=[4.0],
colors=[cl(opacity, opacity, opacity)],
curve_type=sc.Curve.CurveType.LINEAR,
)
with sc.Transform(transform=sc.Matrix44(*target_T.T.flatten())):
k = .5
sc.Line([0, 0, 0], [k, 0, 0], color=cl("#ff0000"), thickness=t)
sc.Line([0, 0, 0], [0, k, 0], color=cl("#00ff00"), thickness=t)
sc.Line([0, 0, 0], [0, 0, k], color=cl("#0000ff"), thickness=t)
def update(self, control):
trans, rot = control.xyz, control.rpy
rot[[0,1]] = rot[[1,0]]
rot[0] *= -1
rot[2] *= -1
dori_world = quaternion.from_float_array(euler_angles_to_quat(rot))
self.current_twist[:3] = trans
self.current_twist[3:] = quaternion.as_rotation_vector(dori_world)
self.invalidate()
| 2,662 |
Python
| 37.594202 | 96 | 0.549587 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/viewport.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.kit.viewport.utility import create_viewport_window, get_num_viewports, get_viewport_from_window_name, disable_context_menu, disable_selection
from omni.kit.viewport.window import get_viewport_window_instances, ViewportWindow
import omni
from pxr import Sdf
from typing import Optional
def get_window_by_name(window_name: str) -> Optional[ViewportWindow]:
try:
from omni.kit.viewport.window import get_viewport_window_instances
# Get every ViewportWindow, regardless of UsdContext it is attached to
for window in get_viewport_window_instances(None):
if window.title == window_name:
return window
except ImportError:
pass
def get_realsense_viewport(camera_path: Sdf.Path,):
num_viewports = get_num_viewports()
if num_viewports == 1:
viewport_window = create_viewport_window(camera_path=camera_path,)
else:
viewport_window = get_window_by_name("Viewport 1")
viewport_window.viewport_api.set_active_camera(camera_path)
return viewport_window
def configure_main_viewport(viewport_window):
viewport_window.viewport_widget.fill_frame = False
viewport_window.viewport_api.set_texture_resolution((1280,720))
def configure_realsense_viewport(viewport_window):
viewport_window.viewport_widget.fill_frame = False
viewport_window.viewport_api.set_texture_resolution((1280,720))
def disable_viewport_interaction(viewport_window):
# These are RAII-style handles which will keep the viewport configured this way until the window handle
# is destroyed.
return disable_selection(viewport_window, disable_click=True), disable_context_menu(viewport_window)
def layout_picture_in_picture(main_viewport, nested_viewport):
width = main_viewport.width / 3
height = 26 + (width * 9/16)
pos_x = main_viewport.width + main_viewport.position_x - width
pos_y = main_viewport.position_y
nested_viewport.setPosition(pos_x, pos_y)
nested_viewport.width = width
nested_viewport.height = height
| 2,189 |
Python
| 38.107142 | 151 | 0.734582 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/ui.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import os
import omni.ui as ui
from enum import Enum
from omni.isaac.ui.ui_utils import add_separator, add_line_rect_flourish, get_style
from omni.kit.window.filepicker import FilePickerDialog
CONTROL_FRAMES = ["End-effector", "Mixed", "World"]
class ControlFrame(Enum):
END_EFFECTOR = 0
MIXED = 1
WORLD = 2
ASSISTANCE_MODES = ["Completion", "Virtual Fixture", "Forced Fixture", "Interactive Fixture"]
class AssistanceMode(Enum):
COMPLETION = 0
VIRTUAL_FIXTURE = 1
FORCED_FIXTURE = 2
INTERACTIVE_FIXTURE = 3
def add_overlay(viewport_window: ui.Window, ext_id: str):
with viewport_window.get_frame(ext_id + "_button_indicator_center"):
with ui.Placer(offset_x=ui.Percent(45), offset_y=ui.Percent(90)):
with ui.ZStack(width=ui.Percent(10), height=48):
center_bg = ui.Rectangle(name="bg", style={"background_color": 0x33000000, "border_radius": 8})
center_label = ui.Label("",name="center_label", alignment=ui.Alignment.CENTER, width=ui.Percent(100), height=ui.Percent(100), style={"color":0x66FFFFFF, "font_size":24})
with viewport_window.get_frame(ext_id + "_button_indicator_left"):
with ui.Placer(offset_x=ui.Percent(10), offset_y=ui.Percent(90)):
with ui.ZStack(width=ui.Percent(5), height=48):
left_bg = ui.Rectangle(name="bg2", style={"background_color": 0x33000000, "border_radius": 8})
left_label = ui.Label("", name="left_label", alignment=ui.Alignment.CENTER, width=ui.Percent(100), height=ui.Percent(100), style={"color":0x99FFFFFF, "font_size":16})
return (center_label, center_bg), (left_label, left_bg)
LABEL_WIDTH = 160
LABEL_WIDTH_LIGHT = 235
LABEL_HEIGHT = 18
HORIZONTAL_SPACING = 4
def str_builder(
label="",
type="stringfield",
default_val=" ",
tooltip="",
on_clicked_fn=None,
use_folder_picker=False,
read_only=False,
item_filter_fn=None,
bookmark_label=None,
bookmark_path=None,
folder_dialog_title="Select Output Folder",
folder_button_title="Select Folder",
):
"""Creates a Stylized Stringfield Widget
Args:
label (str, optional): Label to the left of the UI element. Defaults to "".
type (str, optional): Type of UI element. Defaults to "stringfield".
default_val (str, optional): Text to initialize in Stringfield. Defaults to " ".
tooltip (str, optional): Tooltip to display over the UI elements. Defaults to "".
use_folder_picker (bool, optional): Add a folder picker button to the right. Defaults to False.
read_only (bool, optional): Prevents editing. Defaults to False.
item_filter_fn (Callable, optional): filter function to pass to the FilePicker
bookmark_label (str, optional): bookmark label to pass to the FilePicker
bookmark_path (str, optional): bookmark path to pass to the FilePicker
Returns:
AbstractValueModel: model of Stringfield
"""
with ui.HStack():
ui.Label(label, width=LABEL_WIDTH, alignment=ui.Alignment.LEFT_CENTER, tooltip=tooltip)
str_field = ui.StringField(
name="StringField", width=ui.Fraction(1), height=0, alignment=ui.Alignment.LEFT_CENTER, read_only=read_only
).model
str_field.set_value(default_val)
if use_folder_picker:
def update_field(filename, path):
if filename == "":
val = path
elif filename[0] != "/" and path[-1] != "/":
val = path + "/" + filename
elif filename[0] == "/" and path[-1] == "/":
val = path + filename[1:]
else:
val = path + filename
str_field.set_value(val)
if on_clicked_fn:
on_clicked_fn(val)
def set_initial_path(picker):
input_path = str_field.get_value_as_string()
picker.set_current_directory(input_path)
# Doesn't work...
#picker.navigate_to(input_path)
add_folder_picker_icon(
on_click_fn=update_field,
on_open_fn=set_initial_path,
item_filter_fn=item_filter_fn,
bookmark_label=bookmark_label,
bookmark_path=bookmark_path,
dialog_title=folder_dialog_title,
button_title=folder_button_title,
)
else:
add_line_rect_flourish(False)
return str_field
def add_folder_picker_icon(
on_click_fn,
on_open_fn=None,
item_filter_fn=None,
bookmark_label=None,
bookmark_path=None,
dialog_title="Select Trajectory File",
button_title="Select File",
):
def open_file_picker():
def on_selected(filename, path):
on_click_fn(filename, path)
file_picker.hide()
def on_canceled(a, b):
file_picker.hide()
file_picker = FilePickerDialog(
dialog_title,
allow_multi_selection=False,
apply_button_label=button_title,
click_apply_handler=lambda a, b: on_selected(a, b),
click_cancel_handler=lambda a, b: on_canceled(a, b),
item_filter_fn=item_filter_fn,
enable_versioning_pane=False,
)
if bookmark_label and bookmark_path:
file_picker.toggle_bookmark_from_path(bookmark_label, bookmark_path, True)
if on_open_fn:
on_open_fn(file_picker)
with ui.Frame(width=0, tooltip=button_title):
ui.Button(
name="IconButton",
width=24,
height=24,
clicked_fn=open_file_picker,
style=get_style()["IconButton.Image::FolderPicker"],
alignment=ui.Alignment.RIGHT_TOP,
)
def multi_btn_builder(
label="", type="multi_button", text=None, tooltip=None, on_clicked_fn=None
):
"""Creates a Row of Stylized Buttons
Args:
label (str, optional): Label to the left of the UI element. Defaults to "".
type (str, optional): Type of UI element. Defaults to "multi_button".
count (int, optional): Number of UI elements to create. Defaults to 2.
text (list, optional): List of text rendered on the UI elements. Defaults to ["button", "button"].
tooltip (list, optional): List of tooltips to display over the UI elements. Defaults to ["", "", ""].
on_clicked_fn (list, optional): List of call-backs function when clicked. Defaults to [None, None].
Returns:
list(ui.Button): List of Buttons
"""
btns = []
count = len(text)
with ui.VStack():
ui.Label(label, width=ui.Fraction(1), alignment=ui.Alignment.CENTER, tooltip=tooltip[0])
ui.Spacer(height=5)
for i in range(count):
btn = ui.Button(
text[i].upper(),
name="Button",
width=ui.Fraction(1),
clicked_fn=on_clicked_fn[i],
tooltip=tooltip[i + 1],
style=get_style(),
alignment=ui.Alignment.LEFT_CENTER,
)
if i in [3, 6, 9]:
ui.Spacer(height=10)
btns.append(btn)
if i < count:
ui.Spacer(height=5)
#add_line_rect_flourish()
return btns
from string import Template
class DeltaTemplate(Template):
delimiter = "%"
def strfdelta(tdelta, fmt):
d = {"D": tdelta.days}
hours, rem = divmod(tdelta.seconds, 3600)
minutes, seconds = divmod(rem, 60)
d["H"] = '{:02d}'.format(hours)
d["M"] = '{:02d}'.format(minutes)
d["S"] = '{:02d}'.format(seconds)
t = DeltaTemplate(fmt)
return t.substitute(**d)
| 7,908 |
Python
| 35.615741 | 185 | 0.596232 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/logging.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import numpy as np
import os
from omni.kit.widget.filebrowser.filesystem_model import FileSystemItem
import h5py
SPACEMOUSE_STATE = np.dtype([('trans', '3f'),
('rot', '3f'),
('buttons', 'i')])
POSE_DTYPE = np.dtype([('position', '3f'), ('orientation', '4f')
])
OBJECT_META_DTYPE = np.dtype([("name","S32")])
ROBOT_STATE_DTYPE = np.dtype([('eef_pose', POSE_DTYPE),
('eef_vel_lin', '3f'),
('eef_vel_ang', '3f'),
('joint_positions', '9f'),
('joint_velocities', '9f'),
('applied_joint_positions', '9f'),
('applied_joint_velocities', '9f'),
('target_pose', POSE_DTYPE)
])
UI_STATE_DTYPE = np.dtype([
('camera_pose', POSE_DTYPE),
('primary_camera', int),
('robot_ghost_joint_positions', '9f'),
('object_ghost_index', int),
('object_ghost_pose', POSE_DTYPE),
('ghost_is_snapped', bool)
])
CONTROLS_STATE_DTYPE = np.dtype([
('filtered', SPACEMOUSE_STATE),
('raw', SPACEMOUSE_STATE)
])
def get_scene_state_type(n_objects: int):
return np.dtype([('poses', POSE_DTYPE, (n_objects,))])
def get_stamped_frame_type(n_objects: int):
return np.dtype([('robot_state', ROBOT_STATE_DTYPE), ('scene_state', get_scene_state_type(n_objects)), ('controls_state', CONTROLS_STATE_DTYPE), ('ui_state', UI_STATE_DTYPE), ('step_index', 'i'), ('time', 'f')])
def is_hdf5_file(item: FileSystemItem):
_, ext = os.path.splitext(item.path.lower())
return ext in [".hdf5", ".HDF5"]
def is_folder(item: FileSystemItem) -> bool:
return item.is_folder
async def save_log(file_path, frames, metadata, done=lambda: None):
num_objects = len(metadata["objects"])
with h5py.File(file_path, 'w') as f:
f.attrs.update(metadata)
frames_data = np.empty((len(frames),), dtype=get_stamped_frame_type(num_objects))
for i, frame in enumerate(frames):
data = frame.data
frames_data[i]["robot_state"] = data["robot_state"]
frames_data[i]["scene_state"]["poses"] = data["scene_state"]
frames_data[i]["controls_state"] = data["controls_state"]
frames_data[i]["ui_state"] = data["ui_state"]
frames_data[i]["step_index"] = frame.current_time_step
frames_data[i]["time"] = frame.current_time
f.create_dataset('frames', data=frames_data, compression="gzip")
done()
| 2,717 |
Python
| 35.729729 | 215 | 0.566434 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/serializable_task.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Dict, Optional
import numpy as np
import omni.usd
from omni.isaac.core.tasks.base_task import BaseTask
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
import carb
import numpy as np
import omni.usd
from omni.isaac.core.objects import DynamicCylinder, DynamicCone, DynamicCuboid, VisualCuboid, FixedCuboid, GroundPlane
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
from srl.teleop.assistance.ghost_franka import GhostFranka
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
class SerializableTask(BaseTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
) -> None:
self._initial_scene_description = initial_scene_description
super().__init__(
name=name,
offset=offset,
)
def get_scene_description(self) -> str:
stage = omni.usd.get_context().get_stage()
source_layer = stage.GetRootLayer()
prim_path = f"/World/{self.name}"
export_layer = Sdf.Layer.CreateAnonymous(".usda")
paths_map = {}
Sdf.CreatePrimInLayer(export_layer, "/Root")
Sdf.CopySpec(source_layer, prim_path, export_layer, "/Root")
paths_map[prim_path] = "/Root"
from srl.teleop.assistance import DATA_DIR
for prim in export_layer.rootPrims:
update_reference_paths(prim, DATA_DIR, ".")
for source_path, target_path in paths_map.items():
update_property_paths(prim, source_path, target_path)
return export_layer.ExportToString()
def load_scene_description(self, scene_str: str):
stage = omni.usd.get_context().get_stage()
root_layer = stage.GetRootLayer()
import_layer = Sdf.Layer.CreateAnonymous(".usda")
import_layer.ImportFromString(scene_str)
path_stem = f"/World/{self.name}"
# NOTE: The target path _must_ already be an xform prim, or CopySpec below will create
# a typeless "over" primspec in this spot, which will cause everything in the tree to not render.
paths_map = {}
with Sdf.ChangeBlock():
Sdf.CreatePrimInLayer(root_layer, path_stem)
Sdf.CopySpec(import_layer, "/Root", root_layer, path_stem)
paths_map["/Root"] = path_stem
from srl.teleop.assistance import DATA_DIR
for created_path in paths_map.values():
prim = root_layer.GetPrimAtPath(created_path)
update_reference_paths(prim, ".", DATA_DIR)
for source_path, target_path in paths_map.items():
update_property_paths(prim, source_path, target_path)
stage.GetPrimAtPath(path_stem).SetTypeName("Scope")
def update_property_paths(prim_spec, old_path, new_path):
if not prim_spec:
return
for rel in prim_spec.relationships:
rel.targetPathList.explicitItems = [
path.ReplacePrefix(old_path, new_path) for path in rel.targetPathList.explicitItems
]
for attr in prim_spec.attributes:
attr.connectionPathList.explicitItems = [
path.ReplacePrefix(old_path, new_path) for path in attr.connectionPathList.explicitItems
]
for child in prim_spec.nameChildren:
update_property_paths(child, old_path, new_path)
def update_reference_paths(prim_spec, old_prefix, new_prefix):
if prim_spec.HasInfo(Sdf.PrimSpec.ReferencesKey):
op = prim_spec.GetInfo(Sdf.PrimSpec.ReferencesKey)
items = []
items = op.ApplyOperations(items)
prim_spec.ClearReferenceList()
new_items = []
for item in items:
if item.assetPath.startswith(old_prefix):
new_items.append(Sdf.Reference(
assetPath=item.assetPath.replace(old_prefix, new_prefix, 1),
primPath=item.primPath,
layerOffset=item.layerOffset,
customData=item.customData,
))
else:
new_items.append(item)
prim_spec.referenceList.Append(new_items[-1])
for child in prim_spec.nameChildren:
update_reference_paths(child, old_prefix, new_prefix)
| 5,488 |
Python
| 37.654929 | 119 | 0.666545 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/stacking.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Dict, Optional
import carb
import numpy as np
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.materials import VisualMaterial
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim, RigidContactView
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim, find_matching_prim_paths
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.ghost_object import make_ghost
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
from srl.teleop.assistance.ghost_object import GhostObject
from srl.teleop.assistance.tasks import COLORS
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
from srl.teleop.assistance.tasks.time_limited_task import TimeLimitedTask
from srl.teleop.assistance.transforms import get_obj_poses
class ContactDebounce:
def __init__(self) -> None:
self.last_change_timestamp = None
self.last_value = None
def update(self, contact_matrix, threshold=0.0001):
now = time.time()
non_zero_contact_forces = np.abs(contact_matrix) > threshold
if self.last_value is None:
self.last_value = non_zero_contact_forces
self.last_change_timestamp = np.zeros_like(self.last_value, dtype=float)
self.last_change_timestamp[:] = now
return self.last_value
changed_mask = non_zero_contact_forces ^ self.last_value
expired_mask = (now - self.last_change_timestamp) > 0.3
self.last_value[changed_mask & expired_mask] = non_zero_contact_forces[changed_mask & expired_mask]
self.last_change_timestamp[changed_mask & expired_mask] = now
return self.last_value
class StackingTask(TimeLimitedTask, TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "stacking",
n_cuboids=6,
varieties=1,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
max_duration=60 * 2,
repeat=False,
rng = None
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_cuboids = n_cuboids
self.varieties = varieties
self._done = False
self.robot = None
self._initial_scene_description = initial_scene_description
self.repeat = repeat
self.contact_debounce = ContactDebounce()
if rng is None:
rng = np.random.RandomState(0)
self._initial_random_state = rng.get_state()[1]
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
TimeLimitedTask.__init__(self, max_duration)
return
def get_params(self) -> dict:
base = TimeLimitedTask.get_params(self)
base.update(TableTask.get_params(self))
base.update({
"n_cuboids" : self.n_cuboids,
"varieties": self.varieties,
"seed": self._initial_random_state
})
return base
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
else:
pass
UNIT = 0.032
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.ghosts_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._ghost_objects[name] = GhostObject(prim_path, name=name)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = RigidPrim(prim_path, name=name)
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
for i, color in enumerate(COLORS):
material_raw_prim = add_reference_to_stage(f"{DATA_DIR}/cardboard.usda", f"{self.task_root}/materials/cardboard_color{i}", "Material")
raw_material = UsdShade.Material(material_raw_prim)
shader = UsdShade.Shader(get_prim_at_path(str(raw_material.GetPath()) + "/Shader"))
shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f).Set((color[0] * 2, color[1] * 2, color[2] * 2))
self._materials.append(VisualMaterial(material_raw_prim.GetName(), str(raw_material.GetPath()), raw_material, [shader], raw_material))
#self._materials.append((PreviewSurface(prim_path=f"{objects_path}/materials/color{i}", color=np.array(color))))
self._physics_material = PhysicsMaterial(
prim_path=f"{self.objects_path}/materials/physics",
dynamic_friction=1.0,
static_friction=0.2,
restitution=0.0,
)
sizes = [(UNIT, UNIT, UNIT), (UNIT, UNIT, UNIT * 2), (UNIT, UNIT * 2, UNIT * 2), (UNIT, UNIT, UNIT * 4), (UNIT * 2, UNIT * 2, UNIT * 4)]
for i in range(self.n_cuboids):
choice = i % self.varieties
obj_name = f"cuboid{i}"
prim_path = f"{self.task_objects_path}/{obj_name}"
rand_pos = self.rng.uniform((.4, -.3, .1), (0.5, .3, .1))
new_object = scene.add(
DynamicCuboid(
name=obj_name,
position=rand_pos,
orientation=R.random(random_state=self.rng).as_quat(),
prim_path=prim_path,
size=1.0,
scale=sizes[choice],
visual_material=self._materials[choice],
physics_material=self._physics_material
)
)
self._task_objects[obj_name] = new_object
new_object._rigid_prim_view.set_sleep_thresholds(np.zeros(2))
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(new_object.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
ghost_name = obj_name + "_ghost"
ghost_path = f"{self.ghosts_path}/{ghost_name}"
ghost = scene.add(make_ghost(prim_path, ghost_path, ghost_name, material_path=f"{self.task_root}/materials/ghost"))
self._ghost_objects[ghost_name] = ghost
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
self._table_contact_view = RigidContactView(f"{self.task_objects_path}/cuboid*", [self._scene_objects["table_top"].prim_path], name="table_contact_view", apply_rigid_body_api=False)
# note
self._table_contact_view.name = self._table_contact_view._name
self._table_contact_view.is_valid = lambda: True
self._table_contact_view.post_reset = lambda: None
self._scene.add(self._table_contact_view)
self._objects_contact_view = RigidContactView(f"{self.task_objects_path}/cuboid*", find_matching_prim_paths(f"{self.task_objects_path}/cuboid*"), name="objects_contact_view", apply_rigid_body_api=False)
self._objects_contact_view.name = self._objects_contact_view._name
self._objects_contact_view.is_valid = lambda: True
self._objects_contact_view.post_reset = lambda: None
self._scene.add(self._objects_contact_view)
return
def cleanup(self) -> None:
return super().cleanup()
def rerandomize(self) -> None:
for name, object in self._task_objects.items():
object.set_world_pose(self.rng.uniform((.4, -.3, .1), (0.5, .3, .1)), R.random(random_state=self.rng).as_quat())
def pre_step(self, time_step_index: int, simulation_time: float) -> None:
TimeLimitedTask.pre_step(self, time_step_index, simulation_time)
test = self._objects_contact_view.get_contact_force_matrix()
contacts = self.contact_debounce.update(test)
Ts = get_obj_poses(self._task_objects.values())
lifted = abs(Ts[0,2,3] - Ts[1,2,3]) > .025
grasping = self.robot.gripper_contents != None
in_contact = np.any(contacts[:,:,2])
# Any mutual z forces?
if in_contact and lifted and not grasping:
if self.repeat:
self.rerandomize()
else:
pass
out_of_bounds = np.bitwise_or(Ts[:,:3, 3] > (1.1, .9, 1.5), Ts[:,:3, 3] < (-1.0, -.9, -.75))
if np.any(out_of_bounds):
self.rerandomize()
return
| 9,814 |
Python
| 44.022936 | 210 | 0.606582 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/__init__.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
COLORS = [(69,119,170),(102,204,238),(34,136,51), (204,187,168), (238,102,119), (170,51,119)]
BRIGHT_COLORS = [(0,119,187),(51,187,238),(0,153,136), (238,119,51), (204,51,17), (238,51,119)]
COLORS = [(0,1,1),(1,0,1),(1,1,0), (0,0,1), (0,1,0), (1,0,0)]
COLORS = [(0,.37,1),(.983,.13,.98),(.873,.24,0), (1,.1,.1), (0.276,.56,.1), (1,.1,.1), (0.2,0.2,0.2)]
CUBOID_FACE_COLORS = [(0,0,1), (0,0,1), (0,1,0), (0,1,0), (1,0,0), (1,0,0)]
| 575 |
Python
| 46.999996 | 101 | 0.558261 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/table_task.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from typing import Dict, Optional
import numpy as np
import omni.usd
from omni.isaac.core.tasks.base_task import BaseTask
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
import carb
import numpy as np
import omni.usd
from omni.isaac.core.objects import FixedCuboid, GroundPlane
from omni.isaac.core.prims import RigidPrim, XFormPrim, GeometryPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.tasks.base_task import BaseTask
from omni.isaac.core.utils.prims import get_prim_at_path, add_reference_to_stage, delete_prim
from pxr import Usd, UsdPhysics, Sdf, PhysxSchema, UsdShade
from scipy.spatial.transform import Rotation as R
from omni.isaac.core.materials import PreviewSurface, PhysicsMaterial
from srl.teleop.assistance.camera_franka import CameraFranka
from srl.teleop.assistance.ghost_franka import GhostFranka
from srl.teleop.assistance.logging import OBJECT_META_DTYPE, POSE_DTYPE
class TableTask(BaseTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "sorting",
offset: Optional[np.ndarray] = None,
) -> None:
self._task_objects = {}
self._scene_objects = {}
self._ghost_objects = {}
self._ghost_robots = {}
self._contact_view = None
self.robot = None
self._materials = []
self._physics_material = None
self._settings = carb.settings.get_settings()
# NOTE: Needed for shadows
self._settings.set("/rtx/directLighting/sampledLighting/enabled", True)
def add_groundplane(
self,
prim_path: str,
z_position: float = 0,
name="ground_plane",
static_friction: float = 0.5,
dynamic_friction: float = 0.5,
restitution: float = 0.8,
) -> None:
"""[summary]
Args:
z_position (float, optional): [description]. Defaults to 0.
name (str, optional): [description]. Defaults to "default_ground_plane".
prim_path (str, optional): [description]. Defaults to "/World/defaultGroundPlane".
static_friction (float, optional): [description]. Defaults to 0.5.
dynamic_friction (float, optional): [description]. Defaults to 0.5.
restitution (float, optional): [description]. Defaults to 0.8.
Returns:
[type]: [description]
"""
if self.scene.object_exists(name=name):
carb.log_info("ground floor already created with name {}.".format(name))
return self.scene.get_object(self, name=name)
from srl.teleop.assistance import DATA_DIR
add_reference_to_stage(usd_path=f"{DATA_DIR}/ground_plane.usda", prim_path=prim_path)
physics_material = PhysicsMaterial(
prim_path=f"{prim_path}/materials/physics",
static_friction=static_friction,
dynamic_friction=dynamic_friction,
restitution=restitution,
)
plane = GroundPlane(prim_path=prim_path, name=name, z_position=z_position, physics_material=physics_material)
self.scene.add(plane)
return plane
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
self.task_root = f"/World/{self.name}"
self.objects_path = f"{self.task_root}/objects"
self.materials_path = f"{self.task_root}/materials"
self.task_objects_path = f"{self.objects_path}/task"
self.ghosts_path = f"{self.objects_path}/ghosts"
self.robots_path = f"{self.objects_path}/robots"
stage = omni.usd.get_context().get_stage()
stage.DefinePrim(self.objects_path, "Scope")
stage.DefinePrim(self.task_objects_path, "Scope")
stage.DefinePrim(self.ghosts_path, "Scope")
stage.DefinePrim(self.materials_path, "Scope")
stage.DefinePrim(self.robots_path, "Scope")
from srl.teleop.assistance import DATA_DIR
self.add_groundplane(z_position=-0.83, prim_path=f"{self.task_root}/ground_plane")
add_reference_to_stage(usd_path=DATA_DIR + "/table.usd", prim_path=f"{self.objects_path}/table")
add_reference_to_stage(usd_path=DATA_DIR + "/lighting.usda", prim_path=f"{self.task_root}/lights")
table = XFormPrim(f"{self.objects_path}/table")
table_top = FixedCuboid(f"{self.objects_path}/table/top/collider", name="table_top_collider")
meshcollisionAPI = UsdPhysics.MeshCollisionAPI.Apply(table_top.prim)
meshcollisionAPI.CreateApproximationAttr().Set("boundingCube")
table_top.set_collision_enabled(True)
table.set_world_pose((0.4, 0.0, -0.427), (1,0,0,1))
self._scene_objects["table_top"] = table_top
def add_robot(self):
"""[summary]
Returns:
Franka: [description]
"""
env_path = f"/World/{self.name}/robots"
contact_paths=[obj.prim_path for obj in self._task_objects.values()]
self.robot = self.scene.add(CameraFranka(prim_path=env_path + "/franka", name="franka", contact_paths=None))
def add_ghost_robots(self):
env_path = f"/World/{self.name}/robots"
for ghost_index in range(1):
ghost_name = f"ghost_franka{ghost_index}"
ghost_path = f"{env_path}/{ghost_name}"
ghost_robot = self.scene.add(GhostFranka(prim_path=ghost_path, name=ghost_name, material_path=f"/World/{self.name}/materials/ghost"))
self._ghost_robots[ghost_name] = ghost_robot
def get_ghost_objects(self) -> Dict[str, RigidPrim]:
return self._ghost_objects
def get_scene_objects(self) -> Dict[str, RigidPrim]:
return self._scene_objects
def get_observations(self) -> np.ndarray:
"""[summary]
Returns:
dict: [description]
"""
observations = np.empty((len(self._task_objects),), dtype=POSE_DTYPE)
for i, obj in enumerate(self._task_objects.values()):
observations[i] = obj.get_world_pose()
return observations
def get_params(self) -> dict:
object_info = []
for obj in self._task_objects.values():
object_info.append((obj.name))
return {
"objects" : np.array(object_info, dtype=OBJECT_META_DTYPE),
"robot_name": self.robot.name,
"scene_description": self._initial_scene_description,
}
def set_object_poses(self, poses: np.ndarray):
with Sdf.ChangeBlock():
for i, obj in enumerate(self._task_objects.values()):
pose = poses[i]
obj.set_world_pose(*pose)
def post_reset(self) -> None:
for name, robot in self._ghost_robots.items():
robot.hide()
robot.gripper.open()
self.robot.set_joint_positions(np.array([-0.01561307, -1.2717055, -0.02706644, -2.859138, -0.01377442,
2.0233166, 0.7314064, 0.04, 0.04], dtype=np.float32))
self.robot.gripper.open()
return super().post_reset()
| 7,454 |
Python
| 39.961538 | 145 | 0.632815 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/reaching.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Dict, Optional
import numpy as np
from omni.isaac.core.objects import VisualSphere
from omni.isaac.core.prims import RigidPrim, XFormPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from srl.teleop.assistance.logging import OBJECT_META_DTYPE
from srl.teleop.assistance.tasks.serializable_task import SerializableTask
from srl.teleop.assistance.tasks.table_task import TableTask
from srl.teleop.assistance.tasks.time_limited_task import TimeLimitedTask
from srl.teleop.assistance.transforms import T2pq, make_rotation_matrix, pack_Rp, pq2T, transform_dist
from omni.isaac.franka import KinematicsSolver
TARGET_POSES = [
pack_Rp(make_rotation_matrix((0,0,-1), (-1,0,0)), [.3, -.2, .35]),
pack_Rp(make_rotation_matrix((0,0,-1), (-1,0,0)), [.3, .2, .35]),
pack_Rp(make_rotation_matrix((0,0,-1), (-1,0,0)), [.3, 0, .07]),
pack_Rp(make_rotation_matrix((0,0,-1), (.5,.5,0)), [.3, 0, .07]),
pack_Rp(make_rotation_matrix((0,.1,-1), (-.5,.5,0)), [.35, .10, .12]),
pack_Rp(make_rotation_matrix((1,0,-1), (-1,0,-1)), [.80, 0, .10])]
class ReachingTask(TimeLimitedTask, TableTask, SerializableTask):
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
name: str = "reaching",
n_targets=6,
offset: Optional[np.ndarray] = None,
initial_scene_description = None,
rng = None,
max_duration=60 * 5
) -> None:
self.assets_root_path = get_assets_root_path()
self.n_targets = n_targets
self._done = False
self._current_target = 0
self._current_target_T = None
self._scene_objects = {}
self._ghost_objects = {}
self._ghost_robots = {}
self.robot = None
self._initial_scene_description = initial_scene_description
if rng is None:
rng = np.random.RandomState(0)
self.rng = rng
TableTask.__init__(self,
name=name,
offset=offset,
)
SerializableTask.__init__(self,
name=name,
offset=offset,
initial_scene_description=initial_scene_description
)
TimeLimitedTask.__init__(self, max_duration=max_duration)
def get_params(self) -> dict:
base = TimeLimitedTask.get_params(self)
base.update(TableTask.get_params(self))
base.update({
"n_targets" : self.n_targets,
})
return base
def set_up_scene(self, scene: Scene) -> None:
super().set_up_scene(scene)
if self._initial_scene_description is not None:
self.load_scene_description(self._initial_scene_description)
for prim in get_prim_at_path(self.task_objects_path).GetChildren():
prim_path = prim.GetPath()
name = prim.GetName()
self._task_objects[name] = XFormPrim(prim_path, name=name)
self.add_robot()
self.add_ghost_robots()
else:
from srl.teleop.assistance import DATA_DIR
obj_name = f"target0"
prim_path = f"{self.task_objects_path}/{obj_name}"
target_p, target_q = T2pq(TARGET_POSES[0], as_float_array=True)
target_prim = VisualSphere(prim_path, name=obj_name, position=target_p, orientation=target_q, radius=0.005, color=np.array((1.,1.,1.)))
#target_prim = add_reference_to_stage(usd_path=DATA_DIR + "/axis.usda", prim_path=prim_path)
#target_prim = XFormPrim(str(target_prim.GetPath()), name=obj_name, position=target_p, orientation=target_q, scale=(0.3,0.3,0.3))
new_object = scene.add(
target_prim
)
self._task_objects[obj_name] = new_object
self.add_robot()
self.add_ghost_robots()
self._initial_scene_description = self.get_scene_description()
self.solver = KinematicsSolver(self.robot)
return
def cleanup(self) -> None:
return super().cleanup()
def post_reset(self) -> None:
self._current_target = 0
return super().post_reset()
def set_target(self, T):
pq = T2pq(T, as_float_array=True)
self._task_objects["target0"].set_world_pose(*pq)
actions, success = self.solver.compute_inverse_kinematics(
*pq
)
display_config = np.empty(9)
display_config[:7] = actions.joint_positions[:7]
# IK Doesn't solve for the fingers. Manually set open values
display_config[7] = 0.04
display_config[8] = 0.04
self._ghost_robots['ghost_franka0'].set_joint_positions(display_config)
self._ghost_robots['ghost_franka0'].show(gripper_only=True)
self._current_target_T = T
def pre_step(self, sim_step, sim_time):
TimeLimitedTask.pre_step(self, sim_step, sim_time)
if self._current_target_T is None:
self.set_target(TARGET_POSES[self._current_target])
eff_prim = XFormPrim(self.robot.prim_path + "/panda_hand/eff")
ee_p, ee_q = eff_prim.get_world_pose()
ee_T = pq2T(ee_p, ee_q)
#print(rot_diff)
if transform_dist(ee_T, self._current_target_T, .15) < .03:
# advance to next target
self._current_target = (self._current_target + 1) % len(TARGET_POSES)
self.set_target(TARGET_POSES[self._current_target])
| 5,982 |
Python
| 38.104575 | 147 | 0.608492 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/tasks/time_limited_task.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Optional
class TimeLimitedTask():
"""[summary]
Args:
name (str, optional): [description].
target_position (Optional[np.ndarray], optional): [description]. Defaults to None.
cube_size (Optional[np.ndarray], optional): [description]. Defaults to None.
offset (Optional[np.ndarray], optional): [description]. Defaults to None.
"""
def __init__(
self,
max_duration: Optional[int]
) -> None:
self.max_duration = max_duration
self._start_wallclock_stamp = None
self._done = False
def get_params(self) -> dict:
non_optional = self.max_duration if self.max_duration is not None else -1
return {
"max_duration": non_optional,
}
@property
def time_remaining(self):
if not self.max_duration:
return None
return self.max_duration - (time.time() - self._start_wallclock_stamp)
def is_done(self):
return self._done
def pre_step(self, time_step_index: int, simulation_time: float) -> None:
now = time.time()
if self._start_wallclock_stamp is None:
self._start_wallclock_stamp = time.time()
if self.max_duration and now - self._start_wallclock_stamp > self.max_duration:
self._done = True
| 1,512 |
Python
| 29.87755 | 94 | 0.609127 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/motion.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.motion_commander import MotionCommand, PlannedMoveCommand
from srl.teleop.assistance.transforms import T2pq, pq2T, transform_dist
from omni.isaac.cortex.df import DfAction, DfSetLockState, DfStateMachineDecider, DfStateSequence
import numpy as np
import quaternion
class PullTowardConfig(DfAction):
def enter(self):
pass
def step(self):
ctx = self.context
joint_config = self.params
ctx.tools.commander.set_command(PlannedMoveCommand(joint_config))
def exit(self):
pass
class SetUserTarget(DfAction):
def step(self):
ctx = self.context
new_target = self.params
ctx.tools.commander.set_command(MotionCommand(*new_target))
current_target_pose = ctx.tools.commander.target_prim.get_world_pose()
error = transform_dist(pq2T(*current_target_pose), pq2T(*new_target), .15)
if error < .02:
return None
else:
return self
class Reset(DfStateMachineDecider):
def __init__(self):
# This behavior uses the locking feature of the decision framework to run a state machine
# sequence as an atomic unit.
super().__init__(
DfStateSequence(
[
DfSetLockState(set_locked_to=True, decider=self),
SetUserTarget(),
DfSetLockState(set_locked_to=False, decider=self),
]
)
)
self.is_locked = False
| 1,648 |
Python
| 30.113207 | 97 | 0.64017 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/select.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import itertools
import numpy as np
from omni.isaac.cortex.df import DfAction, DfDecision, DfDecider, DfLogicalState
from srl.teleop.assistance.behavior.scene import ContextTools, SceneContext
from srl.teleop.assistance.behavior.control import ControlContext
from srl.teleop.assistance.proposals import GraspNormalProposalTable, GroupedPoseProposalTable, InvalidReason, PlanePlaneProposal, GraspProposal, \
PlacementProposal
from srl.teleop.assistance.suggestions import check_grasp_orientation_similarity
from srl.teleop.assistance.transforms import R_to_angle, orthogonalize, pack_Rp, transform_dist, unpack_T
from srl.teleop.assistance.transforms import T2pq, make_rotation_matrix, pq2T, invert_T, normalized
import time
import carb
from srl.teleop.assistance.profiling import profile
from srl.teleop.assistance.viz import viz_axis_named_T, viz_axis_named_Ts
class SelectionContext(DfLogicalState):
def __init__(self, tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, use_surrogates: bool, use_snapping: bool):
super().__init__()
self.tools = tools
self.scene_context = scene_context
self.control_context = control_context
self.grasp_distribution = None
self.placement_distribution = None
self.plane_distribution = None
self.current_grasp_proposal = None
self.current_placement_proposal = None
self.cursor_ray = None
self.use_surrogates = use_surrogates
self.use_snapping = use_snapping
self.scene_mesh_dirty = False
self.time_at_last_placement_update = None
self.fixed_proposal = None
self.monitors = [
SelectionContext.monitor_grasp_proposals,
SelectionContext.monitor_placement_proposals
]
def has_grasp_proposal(self):
return self.current_grasp_proposal is not None
@property
def suggestion_is_snap(self):
if self.current_grasp_proposal and isinstance(self.current_grasp_proposal._table, GroupedPoseProposalTable):
return True
return False
def has_placement_proposal(self):
return self.current_placement_proposal is not None
def get_current_proposal(self):
if self.fixed_proposal is not None:
return self.fixed_proposal
if self.has_grasp_proposal():
return self.current_grasp_proposal
elif self.has_placement_proposal():
return self.current_placement_proposal
def reset_placement_proposal(self):
self.current_placement_proposal = None
def reset_grasp_proposal(self):
self.current_grasp_proposal = None
def monitor_grasp_proposals(self):
scene_ctx = self.scene_context
self.scene_mesh_dirty |= scene_ctx.scene_mesh_changed
if self.tools.grasp_table:
self.tools.grasp_table.objects_dirty |= scene_ctx.moving_objects
else:
return
if scene_ctx.object_in_gripper is not None:
return
# Wait until we have an initial collision env
if scene_ctx.scene_mesh is None:
return
table = self.tools.grasp_table
dirty_mask = np.full_like(table._owners, False, dtype=bool)
moving_mask = np.full_like(table._owners, False, dtype=bool)
in_gripper_mask = np.full_like(table._owners, False, dtype=bool)
for i, (dirty, moving) in enumerate(zip(table.objects_dirty, scene_ctx.moving_objects)):
mask = table.mask_by_owner(i)
if dirty:
dirty_mask |= mask
if moving:
moving_mask |= mask
if i == scene_ctx.object_in_gripper_i:
in_gripper_mask = mask
if dirty and not moving:
# We're only going to update this object if it isn't moving!
table.objects_dirty[i] = False
# This object moved! That means any cached IK solves are no longer valid. Clear them out
table._configs[dirty_mask] = np.nan
table.invalidate(moving_mask, InvalidReason.MOVING)
check_mask = dirty_mask & ~moving_mask & ~in_gripper_mask
if check_mask.sum() == 0 and self.scene_mesh_dirty:
check_mask = np.full_like(table._owners, True, dtype=bool)
self.scene_mesh_dirty = False
candidate_Ts_world = scene_ctx.obj_Ts[table._owners[check_mask]] @ table._poses[check_mask]
dists = np.linalg.norm(candidate_Ts_world[:,:3,3], axis=1)
sideness = np.linalg.norm(candidate_Ts_world[:, :3, 2] @ np.array([[1,0,0],[0,1,0]]).T, axis=1)
# Start by assuming the suggestion is valid
table._valid[check_mask] = InvalidReason.VALID.value
table.invalidate_submask(check_mask, dists > 1.0, InvalidReason.UNREACHABLE)
# No side grasps
table.invalidate_submask(check_mask, (sideness > .6) & (candidate_Ts_world[:,2,3] < .3), InvalidReason.UNREACHABLE)
proposable_check_indices, = np.where(table.proposable[check_mask])
proposable_checked_mask = np.zeros(check_mask.sum(), dtype=bool)
proposable_checked_mask[proposable_check_indices] = True
world_col_res = self.tools.geometry_scene.query(candidate_Ts_world[proposable_check_indices], from_mesh=self.tools.gripper_collision_mesh, to_mesh=scene_ctx.scene_mesh, render=False, query_name=f"grasp_scene")
table.invalidate_submask(proposable_checked_mask, world_col_res != 0, InvalidReason.SCENE_COLLISION)
table.update_world_poses_masked(check_mask,candidate_Ts_world)
def monitor_placement_proposals(self):
now = time.time()
scene_ctx = self.scene_context
if self.tools.placement_table:
for table in self.tools.placement_table:
table.objects_dirty |= scene_ctx.moving_objects
if scene_ctx.object_in_gripper is None:
return
obj_to_ee_T = invert_T(scene_ctx.ee_to_obj_T)
# Check whether any current proposals became invalid
gripper_obj_i = scene_ctx.object_in_gripper_i
gripper_obj = scene_ctx.object_in_gripper
# We rate limit this to avoid jumpiness, and reduce CPU burden
if self.time_at_last_placement_update is None or (now -
self.time_at_last_placement_update) > 1.:
table = self.tools.placement_table[gripper_obj_i]
moving_mask = np.full_like(table._owners, False, dtype=bool)
in_gripper_mask = np.full_like(table._owners, False, dtype=bool)
for i, moving in enumerate(scene_ctx.moving_objects):
mask = table.mask_by_owner(i)
if moving:
moving_mask |= mask
if i == gripper_obj_i:
in_gripper_mask = mask
table.objects_dirty[i] = False
# Give a heads up that we can't vouch for proposal quality while the object is moving
table.invalidate(moving_mask, InvalidReason.MOVING)
check_mask = ~moving_mask & ~in_gripper_mask
support_T = scene_ctx.obj_Ts
candidate_Ts = table._poses[check_mask] #wrt to the support obj
ee_Ts_support = candidate_Ts @ obj_to_ee_T
world_Ts = support_T[table._owners[check_mask]] @ ee_Ts_support
placement_Ts = world_Ts @ invert_T(obj_to_ee_T)
dists = np.linalg.norm(world_Ts[:,:3,3], axis=1)
sideness = np.linalg.norm(world_Ts[:, :3, 2] @ np.array([[1,0,0],[0,1,0]]).T, axis=1)
is_top_grasp = check_grasp_orientation_similarity(world_Ts, axis_z_filter=np.array((0.,0.,-1.)), axis_z_filter_thresh=.3)
# Start by assuming the suggestion is valid
table._valid[:] = InvalidReason.VALID.value
table.invalidate_submask(check_mask, dists > 1.0, InvalidReason.UNREACHABLE)
table.invalidate_submask(check_mask, (sideness > .6) & (world_Ts[:,2,3] < .3), InvalidReason.UNREACHABLE)
#suggestions_table.invalidate_submask(check_mask, ~is_top_grasp, InvalidReason.UNREACHABLE)
proposable_check_indices, = np.where(table.proposable[check_mask])
proposable_checked_mask = np.zeros(check_mask.sum(), dtype=bool)
proposable_checked_mask[proposable_check_indices] = True
# Would the gripper collide with the support object? Happens often with side alignments
gripper_collisions = self.tools.geometry_scene.query(world_Ts[proposable_check_indices], from_mesh=self.tools.gripper_collision_mesh, to_mesh=scene_ctx.scene_mesh)
table.invalidate_submask(proposable_checked_mask, gripper_collisions != 0, InvalidReason.SCENE_COLLISION)
# Shrink the gripper object mesh back a bit to see if the volume where it needs to go is roughly empty
proposable_check_indices, = np.where(table.proposable[check_mask])
proposable_checked_mask[:] = False
proposable_checked_mask[proposable_check_indices] = True
scene_collisions = scene_ctx.tools.geometry_scene.query(placement_Ts[proposable_check_indices], gripper_obj.prim, scene_ctx.scene_mesh, from_mesh_scale=0.95, query_name="place")
table.invalidate_submask(proposable_checked_mask, scene_collisions != 0, InvalidReason.SCENE_COLLISION)
table.update_world_poses_masked(check_mask, world_Ts)
self.time_at_last_placement_update = now
class SelectDispatch(DfDecider):
"""
Responsible for deciding whether to update the current suggestion
"""
def enter(self):
self.add_child("select_grasp_suggestion", SelectGraspProposal())
self.add_child("select_placement_suggestion", SelectPlacementProposal())
self.add_child("select_grasp_normal_suggestion", SelectGraspNormalProposal())
self.add_child("select_placement_plane_suggestion", SelectPlacementPlaneProposal())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
control_ctx = self.context.control_context
obj_in_gripper = scene_ctx.object_in_gripper
if len(scene_ctx.objects) == 0 or not control_ctx.user_gave_motion or control_ctx.assistance_in_use:
# No objects to provide assistance for
# If user isn't driving, we won't change the selection. Makes it easy to "rest" the system
if control_ctx.assistance_in_use:
# If user is opting into assistance, don't change the selection out from under them, and hide the cursor
ctx.cursor_ray = None
return DfDecision("do_nothing")
elif not scene_ctx.should_suggest_placements:
return DfDecision("do_nothing")
elif obj_in_gripper is not None:
ctx.reset_grasp_proposal()
if ctx.use_surrogates:
return DfDecision("select_placement_plane_suggestion", (obj_in_gripper))
else:
table = scene_ctx.tools.placement_table[scene_ctx.object_in_gripper_i]
if table and not table.empty():
return DfDecision("select_placement_suggestion", (obj_in_gripper, table))
elif scene_ctx.should_suggest_grasps:
ctx.reset_placement_proposal()
if ctx.use_surrogates:
return DfDecision("select_grasp_normal_suggestion")
else:
grasp_proposals = ctx.tools.grasp_table
if grasp_proposals and not grasp_proposals.empty():
return DfDecision("select_grasp_suggestion", (ctx.tools.grasp_table))
return DfDecision("do_nothing")
class SelectPlacementPlaneProposal(DfAction):
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, AVAILABLE_DOT_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
scene_ctx = self.context.scene_context
ctx.current_placement_proposal = None
gripper_obj = self.params
gripper_obj_i = scene_ctx.objects.index(gripper_obj)
gripper_obj_T = pq2T(*gripper_obj.get_world_pose())
plane_table = scene_ctx.tools.plane_table
scene_ctx.tools.plane_table._object_poses[gripper_obj_i] = gripper_obj_T
if ctx.use_snapping and ctx.tools.placement_table:
snaps_table = ctx.tools.placement_table[gripper_obj_i]
if snaps_table._poses_world is not None:
ctx.placement_distribution = np.full(len(snaps_table), AVAILABLE_DOT_COLOR_KEY)
ctx.placement_distribution[~snaps_table.proposable] = UNAVAILABLE_COLOR_KEY
else:
ctx.placement_distribution = None
elif not ctx.use_snapping:
snaps_table = None
ctx.placement_distribution = None
# Support geometry is in object frame
# Mask to only look at the object we're holding
object_mask = np.empty((len(plane_table.facet_object_owner), 3), dtype=bool)
object_mask[:] = (plane_table.facet_object_owner != gripper_obj_i)[:, None]
support_normals = np.ma.masked_where(object_mask, plane_table.support_normals, copy=False)
#support_centroids = ma.masked_where(object_mask, self.tools.plane_table.support_centroids, copy=False)
# Figure out what way Z axis of end effector is pointing in the object frmae
ee_dir_in_obj = scene_ctx.ee_to_obj_T[:3,:3].T[:,2]
scores = support_normals.dot(ee_dir_in_obj)
closest_to_normal = scores.argmax()
in_gripper_support_face = closest_to_normal
ee_p, ee_q = ctx.tools.commander.get_fk_pq()
in_gripper_support_face_i = in_gripper_support_face
in_gripper_support_centroid = plane_table.get_centroids_world(in_gripper_support_face_i)
in_gripper_support_normal_world = plane_table.get_normals_world(in_gripper_support_face_i)
hit_path, hit_pos, _, hit_dist = ctx.tools.ray_cast(in_gripper_support_centroid, in_gripper_support_normal_world, ignore_obj_handler=lambda path: ctx.tools.should_ignore_in_raycast(path, gripper_obj.prim_path))
ctx.cursor_ray = in_gripper_support_centroid, in_gripper_support_normal_world, hit_dist
dists = np.linalg.norm(plane_table.get_centroids_world() - ee_p, axis=1)
dists[plane_table._valid != InvalidReason.VALID.value] = float('inf')
dists[plane_table.facet_object_owner == gripper_obj_i] = float('inf')
if hit_path:
hit_obj = None
for i, obj in enumerate(itertools.chain(scene_ctx.objects, scene_ctx.tools.scene_objects.values())):
if obj.prim_path == hit_path:
hit_obj = obj
hit_obj_i = i
#print(hit_obj)
break
if hit_obj:
# Take the object we hit by default
dists[plane_table.facet_object_owner != hit_obj_i] = float('inf')
closest_i = np.argmin(dists)
if dists[closest_i] == float("inf") or hit_pos is None:
# No valid plane
ctx.current_placement_proposal = None
return
plane_table.update_object_poses(np.vstack((scene_ctx.obj_Ts, scene_ctx.fixed_Ts)))
if ctx.current_placement_proposal is None or (isinstance(ctx.current_placement_proposal, PlanePlaneProposal) and ctx.current_placement_proposal.place_obj != gripper_obj):
proposal = PlanePlaneProposal(plane_table, closest_i, in_gripper_support_face)
elif ctx.current_placement_proposal and isinstance(ctx.current_placement_proposal, PlanePlaneProposal):
proposal = ctx.current_placement_proposal
if proposal.support_index != closest_i:
proposal = PlanePlaneProposal(plane_table, closest_i, in_gripper_support_face)
else:
proposal = PlanePlaneProposal(plane_table, closest_i, in_gripper_support_face)
# Alternative point solution from projecting straight down
#current_in_plane_p = proposal.project_to_constraint(proposal.place_centroid_world, proposal.place_centroid)
current_in_plane_p = proposal.project_to_constraint(hit_pos, proposal.place_centroid)
#proposal.T_world = proposal.support_obj_T @ proposal.get_placement_T() @ invert_T(assist_ctx.ee_to_obj_T)
#viz_axis_named_T("placement", proposal.get_placement_T(), (.2,.2,.2))
ee_T = ctx.tools.commander.get_fk_T()
ee_ax = ee_T[:3, 0]
ee_ay = ee_T[:3,1]
# Try to project X and Y axes onto the placement plane
# NOTE: This assumes that the robot is at (0,0,0)
vec_to_base = -proposal.support_centroid_world
# Strategy: Project end effector X and Y to be orthogonal to the current placement normal and then again
# to be orthogonal to the support normal. Then we'll have two fully specified rotations which we can
# rotate into alignment which minimize the amount of twisting that needs to happen
# Define a new world rotation: z out of the placement surface, other two axes as projections of gripper axes
proposed_face_R = np.array([ee_ax, ee_ay, proposal.place_normal_world]).T
try:
face_R = orthogonalize(proposed_face_R, prioritize=(2,0,1))
except np.linalg.LinAlgError as e:
face_R = make_rotation_matrix(proposal.place_normal_world, vec_to_base)
#viz_axis_named_Rp("on_obj", face_R, proposal.place_centroid_world, scale=(.2,.2,.2))
proposed_solution_R = np.array([ee_ax, ee_ay, -proposal.support_normal_world]).T
try:
solution_R = orthogonalize(proposed_solution_R, prioritize=(2,0,1))
except np.linalg.LinAlgError as e:
solution_R = make_rotation_matrix(-proposal.support_normal_world, vec_to_base)
#viz_axis_named_Rp("proj_sol", solution_R, current_in_plane_p, scale=(.2,.2,.2))
# Subtract out the original object orientation, leaving just the rotation that takes us from the object to the new frame
obj_to_sol_R = gripper_obj_T[:3,:3].T @ face_R
proposal.T_world = pack_Rp(solution_R @ obj_to_sol_R.T, current_in_plane_p) @ invert_T(scene_ctx.ee_to_obj_T)
if ctx.use_snapping and snaps_table and snaps_table._poses_world is not None:
snap_Ts = snaps_table._poses_world
snap_scores = transform_dist(snap_Ts, proposal.T_world, R_weight=.15)
snap_scores[~snaps_table.proposable] = float('inf')
closest_point_snap_i = np.argmin(snap_scores)
if snap_scores[closest_point_snap_i] < 0.05:
ctx.placement_distribution[:] = AVAILABLE_COLOR_KEY
ctx.placement_distribution[~snaps_table.proposable] = UNAVAILABLE_COLOR_KEY
ctx.placement_distribution[closest_point_snap_i] = SNAPPED_COLOR_KEY
proposal = PlacementProposal(closest_point_snap_i, snaps_table, scene_ctx.objects[snaps_table._owners[closest_point_snap_i]], gripper_obj)
offset_T = proposal.T_world.copy()
offset_T = offset_T @ scene_ctx.ee_to_obj_T
offset_T[2,3] += 0.005
collisions = scene_ctx.tools.geometry_scene.query(offset_T[None], gripper_obj.prim, scene_ctx.scene_mesh, render=False, query_name="place")
if collisions[0] > 0:
if isinstance(proposal, PlacementProposal):
proposal.mark_invalid(InvalidReason.SCENE_COLLISION)
return
#viz_axis_named_T("final", proposal.T_world, scale=(.15,.15,.15))
ctx.current_placement_proposal = proposal
return
class SelectPlacementProposal(DfAction):
def __init__(self):
self.start_T = None
self.start_T_stamp = None
self.memory = None
self.prior = None
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, AVAILABLE_DOT_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
scene_ctx = self.context.scene_context
gripper_obj, table = self.params
Ts = table._poses_world
if Ts is None:
return
if self.memory is None or len(self.prior) != len(Ts):
self.memory = np.zeros((len(Ts)), dtype=float)
if self.prior is None or len(self.prior) != len(Ts):
self.prior = np.zeros((len(Ts)), dtype=float)
mask = table.proposable
ee_p, ee_q = ctx.tools.commander.get_fk_pq()
ee_T = pq2T(ee_p, ee_q)
now = time.time()
if self.start_T_stamp is None or now - self.start_T_stamp > 2.:
self.start_T = ee_T
self.start_T_stamp = now
pairwise_dist = transform_dist(ee_T, Ts[mask], .15)
self.prior[mask] = np.exp(-pairwise_dist)
self.prior[mask] /= self.prior[mask].sum()
self.prior[~mask] = 0
s_to_u_cost = approx_traj_cost(self.start_T, ee_T)
u_to_g_costs = approx_traj_cost(ee_T, Ts[mask])
s_to_g_costs = approx_traj_cost(self.start_T, Ts[mask])
# Eq. 9 in Formalizing Assitive Teleop, because
# above is using a quadratic cost
self.memory[mask] = (np.exp(-s_to_u_cost - u_to_g_costs) / np.exp(-s_to_g_costs))
self.memory[~mask] = 0
if self.context.placement_distribution is None or len(self.context.placement_distribution) != len(Ts):
ctx.placement_distribution = np.ones(len(Ts))
#ctx.tools.viewport_scene.manipulator.set_grasp_distribution(ctx.grasp_distribution)
ctx.placement_distribution[:] = AVAILABLE_DOT_COLOR_KEY
ctx.placement_distribution[~table.proposable] = UNAVAILABLE_COLOR_KEY
placement_scores = self.memory * self.prior
best_i = np.argmax(placement_scores)
if placement_scores[best_i] == float("-inf"):
ctx.current_placement_proposal = None
return
ctx.placement_distribution[best_i] = SNAPPED_COLOR_KEY
support_obj = scene_ctx.objects[table._owners[best_i]]
current_prop = ctx.current_placement_proposal
if current_prop:
if current_prop.identifier == best_i and current_prop.support_obj == support_obj:
return
ctx.current_placement_proposal = PlacementProposal(best_i, table, support_obj, gripper_obj)
def approx_traj_cost(T1, T2, R_weight=.1):
# eq 7 from 10.1007/978-3-319-33714-2_10, squared
R1_inv = np.swapaxes(T1[...,:3,:3], -1, -2)
R2 = T2[...,:3,:3]
return np.linalg.norm(T2[..., :3, 3] - T1[...,:3,3], axis=-1) + (2 * R_weight ** 2 * (1 - (np.trace(R1_inv @ R2, axis1=-1, axis2=-2) / 3)))
class SelectGraspProposal(DfAction):
def __init__(self):
self.memory = None
self.prior = None
self.start_T = None
self.start_T_stamp = None
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
table = self.params
scene_ctx = self.context.scene_context
Ts = table._poses_world
if Ts is None:
return
if self.memory is None:
self.memory = np.ones((len(Ts)), dtype=float)
if self.prior is None:
self.prior = np.ones((len(Ts)), dtype=float)
# viz_axis_named_Ts("grasp_props", Ts)
ee_T = scene_ctx.tools.commander.get_fk_T()
now = time.time()
if self.start_T_stamp is None or now - self.start_T_stamp > 2.:
self.start_T = ee_T
self.memory[:] = 1
self.start_T_stamp = now
#
pairwise_dist = transform_dist(ee_T, Ts, .15)
s_to_u_cost = approx_traj_cost(self.start_T, ee_T)
u_to_g_costs = approx_traj_cost(ee_T, Ts)
s_to_g_costs = approx_traj_cost(self.start_T, Ts)
# Eq. 9 in Formalizing Assitive Teleop, because
# above is using a quadratic cost
self.memory[:] = (np.exp(-s_to_u_cost - u_to_g_costs) / np.exp(-s_to_g_costs))
if ctx.grasp_distribution is None:
ctx.grasp_distribution = np.ones_like(self.prior)
self.prior[:] = np.exp(-pairwise_dist)
self.prior[:] /= self.prior[:].sum()
scores = self.memory * self.prior
scores[~table.proposable] = float("-inf")
ctx.grasp_distribution[:] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[~table.proposable] = UNAVAILABLE_COLOR_KEY
# Pick the max
best_i = np.argmax(scores)
#print(i, highest_prob)
if scores[best_i] == float("-inf"):
ctx.current_grasp_proposal = None
return
ctx.grasp_distribution[best_i] = SNAPPED_COLOR_KEY
current_prop = ctx.current_grasp_proposal
# Don't override accepted proposals
if current_prop is not None:
if best_i != current_prop.identifier:
#viz_axis_named_T("cur_grasp_prop", grasp_proposals[i].T_world)
ctx.current_grasp_proposal = GraspProposal(best_i, table)
else:
# No current proposal to take care of
ctx.current_grasp_proposal = GraspProposal(best_i, table)
class SelectGraspNormalProposal(DfAction):
def get_cursor_T(self, body, point, normal, distance):
scene_ctx = self.context.scene_context
if not body:
return None
target_obj = None
target_obj_i = None
for i, obj in enumerate(scene_ctx.objects):
if obj.prim_path == body:
target_obj = obj
target_obj_i = i
break
if target_obj is None:
return None
ee_T = scene_ctx.tools.commander.get_fk_T()
ee_R, ee_p = unpack_T(ee_T)
carb.profiler.begin(1, "select_grasp_normal(make_table)", active=True)
table = GraspNormalProposalTable(target_obj, ee_T, point, normal)
table._valid[:] = InvalidReason.VALID.value
sideness = np.linalg.norm(table.grasp_Ts[:, :3, 2] @ np.array([[1,0,0],[0,1,0]]).T, axis=1)
# No side grasps beneath 30cm
table.invalidate((sideness > .6) & (table.grasp_Ts[:,2,3] < .3), InvalidReason.UNREACHABLE)
carb.profiler.end(1, True)
if scene_ctx.scene_mesh is None:
return
initial_check_mask = table.proposable
with profile("initial_collision_check"):
collisions, contact_points = scene_ctx.tools.geometry_scene.query_grasp_contacts(table.grasp_Ts[initial_check_mask], scene_ctx.tools.gripper_collision_mesh, scene_ctx.scene_mesh, render=False, query_name="normal")
table.invalidate_submask(table.proposable, collisions > 0, InvalidReason.SCENE_COLLISION)
"""left_T = table.grasp_Ts[best_i].copy()
right_T = table.grasp_Ts[best_i].copy()
left_T[:3, 3] += left_T[:3, 1] * (.04 - contact_points[best_i, 0])
right_T[:3, 3] -= right_T[:3, 1] * (.04 - contact_points[best_i, 1])
viz_axis_named_T("left_t", left_T, scale=(.2,.2,.2))
viz_axis_named_T("right_t", right_T,scale=(.2,.2,.2))"""
#viz_axis_named_T("old", table.grasp_Ts[0], scale=(.1,.1,.1))
if table.proposable.sum() == 0:
return None
collision_free_mask = collisions == 0
left_shift_amount = (.04 - contact_points[collision_free_mask,1]) - (.04 - contact_points[collision_free_mask, 0]) / 2
recheck_ind = np.where(initial_check_mask)[0][collision_free_mask]
to_check_again = table.grasp_Ts[recheck_ind].copy()
to_check_again[:, :3, 3] -= to_check_again[:, :3, 1] * left_shift_amount[:, None]
#viz_axis_named_T("new", table.grasp_Ts[0], scale=(.1,.1,.1))
with profile("collision_check_post_adjust"):
new_collisions = scene_ctx.tools.geometry_scene.query(to_check_again, scene_ctx.tools.gripper_collision_mesh, scene_ctx.scene_mesh, render=False, query_name="normal")
successfully_moved_ind = recheck_ind[new_collisions == 0]
table.grasp_Ts[successfully_moved_ind] = to_check_again[new_collisions == 0]
carb.profiler.begin(1, "select_grasp_normal(calcs)", active=True)
rot_to_grasp = ee_R.T @ table.grasp_Ts[table.proposable, :3, :3]
rot_diff = R_to_angle(rot_to_grasp)
# Show equally good (wrt z axis rotation) grasps
#viz_axis_named_Ts("best_rots", table.grasp_Ts[best_rot_i], scale=(0.01, 0.01, 0.01))
best_rot_i = np.where(rot_diff == rot_diff.min())[0]
standoff_subset = contact_points[collision_free_mask][best_rot_i, 2]
best_subset_standoff_i = np.where(standoff_subset == standoff_subset.min())[0]
best_i = np.where(table.proposable)[0][best_rot_i[best_subset_standoff_i][0]]
carb.profiler.end(1, True)
if not table.valid[best_i]:
return None
return best_i, table
def step(self):
from srl.teleop.assistance.behavior.display import AVAILABLE_COLOR_KEY, SNAPPABLE_COLOR_KEY, SNAPPED_COLOR_KEY, UNAVAILABLE_COLOR_KEY
ctx = self.context
scene_ctx = self.context.scene_context
ee_T = scene_ctx.tools.commander.get_fk_T()
ee_R, ee_p = unpack_T(ee_T)
# Where is the tip of the gripper pointing
ee_az = ee_T[:3, 2]
snaps = ctx.tools.grasp_table
if ctx.grasp_distribution is None and ctx.use_snapping and snaps._poses_world is not None:
ctx.grasp_distribution = np.full(len(snaps), -4.)
ctx.grasp_distribution[~snaps.proposable] = float('-inf')
elif not ctx.use_snapping:
ctx.grasp_distribution = None
if snaps._poses_world is not None:
snap_Ts = snaps._poses_world
else:
snap_Ts = np.empty((0,4,4))
disp_to_snap = snap_Ts[:, :3, 3] - ee_p
dist_to_snap = np.linalg.norm(disp_to_snap, axis=1)
dir_to_snap = disp_to_snap / np.expand_dims(dist_to_snap, axis=1)
# Angle between z axis of gripper (point dir) and each grasp position
point_dir_scores = np.arccos(dir_to_snap.dot(ee_az))
body, point, normal, distance = ctx.tools.ray_cast(ee_p, ee_az, ignore_obj_handler=ctx.tools.should_ignore_in_raycast)
target_obj = None
target_obj_i = None
for i, obj in enumerate(scene_ctx.objects):
if obj.prim_path == body:
target_obj = obj
target_obj_i = i
break
ctx.cursor_ray = ee_p, ee_az, distance
cursor_results = self.get_cursor_T(body, point, normal, distance)
cone_cutoff = .2
if cursor_results:
cursor_i, table = cursor_results
cursor_T = table.grasp_Ts[cursor_i]
if snaps._poses_world is None or not ctx.use_snapping:
# The snaps haven't loaded yet
ctx.current_grasp_proposal = GraspProposal(cursor_i, table)
return
#viz_axis_named_T("cursor_T", cursor_T)
snap_scores = transform_dist(snap_Ts, cursor_T, R_weight=.15)
snap_scores[~snaps.proposable] = float('inf')
closest_snap_i = np.argmin(snap_scores)
ctx.grasp_distribution[:] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[snaps._owners == target_obj_i] = SNAPPABLE_COLOR_KEY
ctx.grasp_distribution[~snaps.proposable] = UNAVAILABLE_COLOR_KEY
if snap_scores[closest_snap_i] < 0.05:
ctx.grasp_distribution[closest_snap_i] = SNAPPED_COLOR_KEY
ctx.current_grasp_proposal = GraspProposal(closest_snap_i, snaps)
else:
ctx.current_grasp_proposal = GraspProposal(cursor_i, table)
elif ctx.use_snapping and target_obj is None and snaps._poses_world is not None:
# Missed the object (so no cursor results). Try to provide a snap
snap_scores = transform_dist(snap_Ts, ee_T, .15)
# Only select amongst those we are pointing at
snap_scores[point_dir_scores > cone_cutoff] = float('inf')
snap_scores[~snaps.proposable] = float('inf')
closest_snap_i = np.argmin(snap_scores)
ctx.grasp_distribution[point_dir_scores <= cone_cutoff] = SNAPPABLE_COLOR_KEY
ctx.grasp_distribution[point_dir_scores > cone_cutoff] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[~snaps.proposable] = UNAVAILABLE_COLOR_KEY
if snap_scores[closest_snap_i] == float('inf'):
ctx.current_grasp_proposal = None
else:
ctx.grasp_distribution[closest_snap_i] = SNAPPED_COLOR_KEY
ctx.current_grasp_proposal = GraspProposal(closest_snap_i, snaps)
else:
# Keep the old proposal if it's close enough to the current collision point
if ctx.current_grasp_proposal and isinstance(ctx.current_grasp_proposal._table, GraspNormalProposalTable) and np.linalg.norm(point - ctx.current_grasp_proposal._table.point) < 0.1:
pass
else:
ctx.current_grasp_proposal = None
if ctx.grasp_distribution is not None:
ctx.grasp_distribution[:] = AVAILABLE_COLOR_KEY
ctx.grasp_distribution[~snaps.proposable] = UNAVAILABLE_COLOR_KEY
| 33,511 |
Python
| 47.357864 | 225 | 0.630778 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/control.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import time
from typing import Callable
import numpy as np
from omni.isaac.cortex.df import DfAction, DfDecider, DfDecision, DfLogicalState
from srl.teleop.assistance.proposals import PlacementProposal, make_approach_params_for_proposal, sigmoid
from .scene import ContextTools, SceneContext
from .motion import PullTowardConfig, Reset
from srl.teleop.assistance.motion_commander import VelocityMotionCommand, MotionCommand, calc_shifted_approach_target
from srl.teleop.assistance.transforms import T2pq, invert_T, normalized, pq2T, R_to_rot_vector
from srl.teleop.assistance.ui import AssistanceMode, ControlFrame
from srl.spacemouse.buttons import SpaceMouseButtonDebouncer, DEVICE_BUTTON_STRUCT_INDICES
from srl.spacemouse.spacemouse import SpaceMouse
from omni.isaac.core.utils.rotations import euler_angles_to_quat
import quaternion
class ControlContext(DfLogicalState):
CONTROL_MAPPING = {
# Pro Mouse
"CTRL": "ASSIST",
"ALT": "ASSIST",
"ESC": "ASSIST",
"SHIFT": "GRIPPER",
"ROLL CLOCKWISE": "SWAP VIEW",
"F": "SWAP VIEW",
"T": "SWAP VIEW",
"R": "SWAP VIEW",
"ROTATION": "SWAP VIEW",
"FIT": "HOME",
"MENU": "HOME",
# 2 Button Mouse
"LEFT": "GRIPPER",
"RIGHT": "ASSIST"
}
COMMAND_TO_BUTTONS = {}
def __init__(self, tools: ContextTools, spacemouse: SpaceMouse, control_frame: ControlFrame, assistance_mode: AssistanceMode, scene_context: SceneContext, avoid_obstacles: bool):
super().__init__()
for k, v in ControlContext.CONTROL_MAPPING.items():
ControlContext.COMMAND_TO_BUTTONS[v] = ControlContext.COMMAND_TO_BUTTONS.get(v, []) + [k]
self.tools = tools
self.command = None
self.button_command_names = ("GRIPPER", None, "ASSIST", None, None, "HOME", "SWAP VIEW")
self.button_command = False, False, False, False, False, False, None
self.spacemouse = spacemouse
self.spacemouse_debouncer = SpaceMouseButtonDebouncer(DEVICE_BUTTON_STRUCT_INDICES[self.spacemouse.name], {"SHIFT", "LEFT", "RIGHT"}, False, 0.3)
self.scene_context = scene_context
self.gripper_opened = np.sum(tools.robot.gripper.get_joint_positions()) > .05
self.monitors = [
ControlContext.monitor_control_received,
]
self.assistance_in_use = False
self.user_gave_motion = False
self.avoid_obstacles = avoid_obstacles
self.current_command_text = ""
self.control_frame = control_frame
self.assistance_mode = assistance_mode
# Needs to be provided after construction
self.selection_context = None
def monitor_control_received(self):
control = self.spacemouse.get_controller_state()
if control is None:
return
self.command = None
stamp, trans, rot, raw_buttons = control
buttons = self.spacemouse_debouncer.update(raw_buttons)
self.update_current_command_text(buttons)
def buttons_mapped(command):
value = False
for button_name in ControlContext.COMMAND_TO_BUTTONS[command]:
value |= buttons[button_name]
return value
values = []
for i, command_name in enumerate(self.button_command_names):
if isinstance(command_name, tuple):
hit = False
for sub_control in command_name:
if buttons_mapped(sub_control):
values.append(sub_control)
hit = True
if not hit:
values.append(None)
elif command_name is None:
values.append(False)
else:
values.append(buttons_mapped(command_name))
self.button_command = tuple(values)
if not np.allclose(np.hstack((trans, rot)), np.array([0,0,0,0,0,0]), atol=1e-4):
self.command = trans, rot
else:
self.command = None
def control_to_twist(self, trans, rot):
step = self.tools.world.get_physics_dt()
# Normalize control by sim step size so increasing sim frequency doesn't make controller more sensitive
trans = np.array(trans)
rot = np.array(rot)
trans *= step
rot *= step
# Flip X and Y to match sim
trans[[0,1]] = trans[[1,0]]
trans[1] *= -1
dori_world = quaternion.from_float_array(euler_angles_to_quat(rot))
return trans, quaternion.as_rotation_vector(dori_world)
def update_current_command_text(self, buttons):
if buttons.value == 0:
# Nothing is being pressed right now
self.current_command_text = ""
else:
active_controls = set()
for button_name, command_name in ControlContext.CONTROL_MAPPING.items():
if buttons[button_name]:
active_controls.add(command_name)
self.current_command_text = " ".join(list(active_controls))
def get_control_frames(self, frame_preference: ControlFrame):
perm_rot = np.identity(3)
perm_rot[:, 0] *= -1
if frame_preference is ControlFrame.END_EFFECTOR:
perm = np.identity(3)
perm[:, 0] *= -1
perm[:, 2] *= -1
return perm, perm_rot
elif frame_preference is ControlFrame.MIXED:
ee_R = self.tools.commander.get_fk_R()
return ee_R.T, perm_rot
elif frame_preference is ControlFrame.WORLD:
ee_R = self.tools.commander.get_fk_R()
camera_rotated_R = ee_R.T.copy()
camera_rotated_R[:, 0] *= -1
camera_rotated_R[:, 1] *= -1
perm_rot = np.identity(3)
perm_rot[:, 1] *= 1
perm_rot[:, 2] *= -1
return camera_rotated_R, camera_rotated_R @ perm_rot
class ControlDispatch(DfDecider):
def __init__(self, view_change_callback: Callable):
super().__init__()
self.view_change_callback = view_change_callback
def enter(self):
self.add_child("reset", Reset())
self.add_child("pull_toward_config", PullTowardConfig())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
selection_ctx = self.context.selection_context
robot = ctx.tools.robot
ctx.assistance_in_use = False
ctx.user_gave_motion = False
gripper, cancel, pull, reset, bypass, modifier1, view_change = ctx.button_command
# Gripper and view change should apply no matter what other buttons are currently being held
if gripper:
# Have we already tried to open? If so, interpret as request to close
if ctx.gripper_opened:
robot.gripper.close()
else:
robot.gripper.open()
# User expressed intent to close, and we tried
ctx.gripper_opened = not ctx.gripper_opened
if view_change is not None and self.view_change_callback is not None:
self.view_change_callback(view_change)
current_proposal = selection_ctx.get_current_proposal()
if modifier1:
# Pull back to home config
return DfDecision("pull_toward_config", (robot.HOME_CONFIG))
# When we're driving the robot, repel from objects
if ctx.command is not None and ctx.avoid_obstacles:
scene_ctx.disable_near_obstacles()
else:
scene_ctx.disable_all_obstacles()
if ctx.command is not None:
if current_proposal and not bypass and \
(ctx.assistance_mode == AssistanceMode.FORCED_FIXTURE or ctx.assistance_mode == AssistanceMode.VIRTUAL_FIXTURE):
# Interface is in a mode where we're going to limit their velocities
trans, rot = ctx.command
trans = current_proposal.map_velocity_input(ctx.tools.commander.get_current_p(), trans)
else:
trans, rot = ctx.command
if ctx.assistance_mode == AssistanceMode.FORCED_FIXTURE and current_proposal:
# TODO: Move this forcing into the map_velocity_input implementation and make amount of forcing a float param
pose_T = current_proposal.T_world
pose = T2pq(pose_T)
# FIXME: no effect until I can enhance the motion command interface
frame_trans, frame_rot = ctx.get_control_frames(ctx.control_frame)
linear_vel, angular_vel = ctx.control_to_twist(trans, rot)
approach_params = None
# Shape control towards the suggestion if the user is holding that button
if pull and current_proposal:
ctx.assistance_in_use = True
prop_T = current_proposal.T_world
ee_T = ctx.tools.commander.get_fk_T()
approach_params = make_approach_params_for_proposal(current_proposal)
if approach_params:
offset_T = prop_T.copy()
offset_T[:3, 3] = calc_shifted_approach_target(prop_T, ee_T, approach_params)
else:
offset_T = prop_T
target_T = invert_T(ee_T) @ offset_T
dist_to_prop = np.linalg.norm(target_T[:3,3])
lin_to_prop = normalized(target_T[:3,3]) * np.linalg.norm(linear_vel) #min(dist_to_prop, 1/20, np.linalg.norm(linear_vel))
aa_to_prop = R_to_rot_vector(target_T[:3,:3])
theta_to_prop = np.linalg.norm(aa_to_prop)
aa_to_prop = normalized(aa_to_prop) * np.linalg.norm(angular_vel) #min(theta_to_prop, 1/20, np.linalg.norm(angular_vel))
alpha = sigmoid(-dist_to_prop, -.3, 5)
#viz_axis_named_T("twist", ee_T @ integrate_twist(lin_to_prop, aa_to_prop, 1))
#linear_vel = (1 - alpha) * linear_vel + (alpha * (lin_to_prop @ frame_trans))
#angular_vel = (1 - alpha) * angular_vel + (alpha * (aa_to_prop @ frame_rot))
linear_vel = linear_vel + (alpha * (lin_to_prop @ frame_trans))
angular_vel = angular_vel + (alpha * (aa_to_prop @ frame_rot))
ctx.tools.commander.set_command(
VelocityMotionCommand(
linear_vel,
angular_vel,
frame_trans,
frame_rot
)
)
if not pull:
# We only consider updating the proposals if the user is moving the robot.
# But if they're asking to be pulled, we won't pull the current suggestion out from under you.
# This makes the system easy to "put to rest" by simply taking your hands off the controls.
ctx.user_gave_motion = True
return DfDecision("do_nothing")
elif pull:
# No command, just pull toward the current target
current_proposal = selection_ctx.get_current_proposal()
if current_proposal is not None:
current_proposal.T_world
ctx.assistance_in_use = True
approach_params = make_approach_params_for_proposal(current_proposal)
# current_proposal.T_obj @ invert_T(pq2T(*scene_ctx.object_in_gripper.get_world_pose()))
"""if isinstance(current_proposal, PlacementProposal):
# For some reason placements are sometimes slightly offset at the end of the pull. It seems
# to be a controller issue...
ee_delta = current_proposal.T_world @ invert_T(ctx.tools.commander.get_eef_T())
obj_delta = current_proposal.get_placement_T() @ invert_T(pq2T(*scene_ctx.object_in_gripper.get_world_pose()))
offsets = np.linalg.norm(ee_delta[:3,3]), np.linalg.norm(obj_delta[:3,3])"""
ctx.tools.commander.set_command(MotionCommand(
*T2pq(current_proposal.T_world),
approach_params=approach_params
))
else:
ctx.tools.commander.set_command(
VelocityMotionCommand(
np.array((0, 0, 0)),
np.array((0, 0, 0))
)
)
else:
ctx.tools.commander.set_command(
VelocityMotionCommand(
np.array((0,0,0)),
np.array((0,0,0))
)
)
return DfDecision("do_nothing")
| 12,838 |
Python
| 41.939799 | 182 | 0.584281 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/scene.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
import itertools
import time
from typing import Dict, List
import carb
import numpy as np
from omni.isaac.core.prims.rigid_prim import RigidPrim
from omni.isaac.cortex.dfb import DfLogicalState
from omni.isaac.debug_draw import _debug_draw
from omni.isaac.franka import KinematicsSolver
from omni.physx import get_physx_scene_query_interface
from srl.teleop.assistance.check_collision import WarpGeometeryScene
from srl.teleop.assistance.motion_commander import MotionCommander
from srl.teleop.assistance.proposals import InvalidReason, PlanePlaneProposalTable, GroupedPoseProposalTable
from srl.teleop.assistance.transforms import get_obj_poses, invert_T, pq2T, transform_dist, FrameVelocityEstimator
from srl.teleop.assistance.scene import AssistanceManipulator
class ContextTools:
def __init__(self, world,
viewport_manipulator: AssistanceManipulator,
objects: Dict[str, RigidPrim],
scene_objects: Dict[str, RigidPrim],
obstacles,
object_ghosts: List[RigidPrim],
robot,
robot_ghosts,
commander: MotionCommander,
grasp_table: GroupedPoseProposalTable,
placement_table: GroupedPoseProposalTable,
plane_table: PlanePlaneProposalTable,
geometry_scene: WarpGeometeryScene,
gripper_collision_mesh):
self.world = world
self.viewport_manipulator = viewport_manipulator
self.objects = objects
self.scene_objects = scene_objects
self.obstacles = obstacles
self.object_ghosts = object_ghosts
self.robot_ghosts = robot_ghosts
self.robot = robot
self.commander = commander
self.solver = KinematicsSolver(self.robot)
self.grasp_table = grasp_table
self.placement_table = placement_table
self.plane_table = plane_table
self.geometry_scene = geometry_scene
self.gripper_collision_mesh = gripper_collision_mesh
self.draw = _debug_draw.acquire_debug_draw_interface()
self.physx_query_interface = get_physx_scene_query_interface()
self._obj_paths_set = set([obj.prim_path for obj in self.objects.values()])
self._raycastable_paths_set = set([obj.prim_path for obj in self.scene_objects.values()]).union(self._obj_paths_set)
self.robot.set_contact_path_filter(lambda path: str(path) in self._obj_paths_set)
def ray_cast(self, position, direction, max_dist=10, offset=np.array((0,0,0)), ignore_obj_handler=lambda x: False):
origin = (position[0], position[1], position[2])
ray_dir = (direction[0], direction[1], direction[2])
last_hit = None
last_hit_dist = float("inf")
def report_all_hits(hit):
if ignore_obj_handler(hit.rigid_body):
return True
nonlocal last_hit
nonlocal last_hit_dist
if hit.distance < last_hit_dist:
last_hit_dist = hit.distance
last_hit = hit
return True
self.physx_query_interface.raycast_all(origin, ray_dir, max_dist, report_all_hits)
if last_hit:
distance = last_hit.distance
return last_hit.rigid_body, np.array(last_hit.position), np.array(last_hit.normal), distance
return None, None, None, 10000.0
def should_ignore_in_raycast(self, path, also_ignore=None):
if also_ignore and path == also_ignore:
return True
if path not in self._raycastable_paths_set:
return True
return False
class SceneContext(DfLogicalState):
def __init__(self, tools: ContextTools, should_suggest_grasps, should_suggest_placements):
super().__init__()
self.tools = tools
self.objects = []
for _, obj in self.tools.objects.items():
self.objects.append(obj)
self.obstacle_enabled = {}
for obs in itertools.chain(self.tools.objects.values(), self.tools.scene_objects.values()):
try:
self.tools.commander.add_obstacle(obs)
self.obstacle_enabled[obs.name] = True
except:
pass
self.disable_all_obstacles()
self.should_suggest_grasps = should_suggest_grasps
self.should_suggest_placements = should_suggest_placements
self.obj_Ts = get_obj_poses(self.objects)
self.fixed_Ts = get_obj_poses(list(self.tools.scene_objects.values()))
self.scene_mesh_object_dirty = np.full((len(self.objects),), False, dtype=bool)
self.scene_mesh_changed = False
# Conservative initialization. Takes us a sim step to be able to see what's actually moving
self.moving_objects = np.full((len(self.objects),), True, dtype=bool)
self.last_movement_stamps = np.array([time.time() for _ in range(len(self.objects))])
self.object_gripper_rel_T_trackers = FrameVelocityEstimator(tools.world.get_physics_dt())
self.object_in_gripper = None
self.object_in_gripper_i = None
self.ee_vel_tracker = FrameVelocityEstimator(tools.world.get_physics_dt())
self.ee_to_obj_T = None
self.scene_mesh = None
self.last_scene_mesh_update = time.time()
self.monitors = [
SceneContext.monitor_object_movement,
SceneContext.monitor_object_in_gripper,
SceneContext.monitor_scene_mesh,
SceneContext.monitor_plane_table,
SceneContext.monitor_relative_object_dist_vel,
]
def get_obj_relative_metrics(self):
# These can be used to make heuristic decisions about which object the user is trying to interact with
metrics = []
assert False
for _, obj_tracker in enumerate(self.object_gripper_rel_T_trackers):
T = obj_tracker.T_prev
# Check displacement
dist = np.linalg.norm(T[:3, 3])
vel = obj_tracker.T_vel[:3, 3]
metrics.append((dist,vel))
return metrics
def monitor_object_movement(self):
obj_poses = get_obj_poses(self.objects)
now = time.time()
dists = transform_dist(obj_poses, self.obj_Ts, .15)
time_deltas = now - self.last_movement_stamps
close_mask = dists < 0.005
last_move_timedout_mask = time_deltas > .3
self.moving_objects[close_mask & last_move_timedout_mask] = False
self.moving_objects[~close_mask] = True
self.obj_Ts[~close_mask] = obj_poses[~close_mask]
self.last_movement_stamps[~close_mask] = now
def monitor_scene_mesh(self):
self.scene_mesh_changed = False
self.scene_mesh_object_dirty |= self.moving_objects
except_gripper_obj_mask = np.full((len(self.objects)), True)
if self.object_in_gripper:
except_gripper_obj_mask[self.object_in_gripper_i] = False
if np.any(self.scene_mesh_object_dirty[except_gripper_obj_mask]) and not any(self.moving_objects) and (time.time() - self.last_scene_mesh_update) > 1.5:
obj_poses = get_obj_poses(self.objects)
self.last_scene_mesh_update = time.time()
carb.profiler.begin(1, "make_scene_mesh", active=True)
to_combine = []
to_combine_xforms = []
for obj, xform in itertools.chain(zip(self.objects, obj_poses), zip(self.tools.scene_objects.values(), self.fixed_Ts)):
if self.object_in_gripper == obj:
continue
if not hasattr(obj, 'geom'):
continue
to_combine.append(obj.geom)
to_combine_xforms.append(xform)
self.scene_mesh = self.tools.geometry_scene.combine_geometries_to_mesh(to_combine, to_combine_xforms)
carb.profiler.end(1, True)
self.scene_mesh_object_dirty[except_gripper_obj_mask] = False
# Let scene mesh consumers know they need to revalidate
self.scene_mesh_changed = True
def monitor_plane_table(self):
if not self.tools.plane_table:
return
self.tools.plane_table.update_object_poses(np.vstack((self.obj_Ts, self.fixed_Ts)))
# Let's see which facets of the object look good for placement now
# Support geometry is in object frame
self.tools.plane_table._valid[:] = InvalidReason.VALID.value
support_normals = self.tools.plane_table.get_normals_world()
scores = np.arccos(support_normals.dot((0,0,1)))
self.tools.plane_table._valid[scores > 0.25] = InvalidReason.UNREACHABLE.value
#self.tools.viewport_manipulator.manipulator.invalidate()
def monitor_relative_object_dist_vel(self):
eef_T = self.tools.commander.get_fk_T()
in_gripper_frame = invert_T(eef_T) @ self.obj_Ts
self.object_gripper_rel_T_trackers.update(in_gripper_frame)
self.ee_vel_tracker.update(eef_T)
def monitor_object_in_gripper(self):
path_in_hard = self.tools.robot.gripper_contents
for i, obj in enumerate(self.objects):
if obj.prim_path != path_in_hard:
continue
if self.object_in_gripper != obj:
# Gripper object changed, force the scene mesh to regenerate
self.scene_mesh_object_dirty[:] = True
self.object_in_gripper = obj
self.object_in_gripper_i = i
break
else:
self.object_in_gripper = None
self.object_in_gripper_i = None
return
in_gripper_pos, in_gripper_rot = self.object_in_gripper.get_world_pose()
ee_T = self.tools.commander.get_eef_T()
#viz_axis_named_T("ee_T", ee_T)
gripper_obj_T = pq2T(in_gripper_pos, in_gripper_rot)
# "subtract" out the part of the transform that goes to the ee, leaving relative transform
ee_to_obj_T = invert_T(ee_T).dot(gripper_obj_T)
self.ee_to_obj_T = ee_to_obj_T
def disable_near_obstacles(self):
ee_T = self.tools.commander.get_fk_T()
ee_p = ee_T[:3, 3]
ee_point_dir = ee_T[:3, 2]
obj_centroids = self.obj_Ts[:, :3,3]
# Displacement to each grasp (in world frame)
disp_to_grasp = obj_centroids - ee_p
dist_to_grasp = np.linalg.norm(disp_to_grasp, axis=1)
dir_to_obj = disp_to_grasp / dist_to_grasp[:, None]
# Angle between z axis of gripper (point dir) and each grasp position
point_dir_scores = dir_to_obj.dot(ee_point_dir)
should_disable_collision = ((dist_to_grasp < 0.25) & (point_dir_scores > 0.3)) | (dist_to_grasp < 0.05)
for i, should_disable in enumerate(should_disable_collision):
obj = self.objects[i]
if obj.name not in self.obstacle_enabled:
continue
active = self.obstacle_enabled[obj.name]
if should_disable and active:
self.tools.commander.disable_obstacle(self.objects[i])
self.obstacle_enabled[obj.name] = False
elif not should_disable and not active:
self.tools.commander.enable_obstacle(self.objects[i])
self.obstacle_enabled[obj.name] = True
def disable_all_obstacles(self):
for obj in self.objects:
if obj.name not in self.obstacle_enabled:
continue
active = self.obstacle_enabled[obj.name]
if active:
self.tools.commander.disable_obstacle(obj)
self.obstacle_enabled[obj.name] = False
| 11,573 |
Python
| 43.344827 | 160 | 0.634494 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/network.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from omni.isaac.cortex.dfb import DfNetwork
from srl.spacemouse.spacemouse import SpaceMouse
from ..ui import AssistanceMode, ControlFrame
from .scene import ContextTools, SceneContext
from .control import ControlDispatch, ControlContext
from .display import DispatchDisplay, DisplayContext
from .select import SelectDispatch, SelectionContext
from typing import Callable
def build_suggestion_display_behavior(tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, selection_context: SelectionContext, label):
return DfNetwork(root=DispatchDisplay(), context=DisplayContext(tools, scene_context, control_context, selection_context, label))
def build_control_behavior(tools: ContextTools,
spacemouse: SpaceMouse,
control_frame: ControlFrame,
scene_context: SceneContext,
assistance_mode: AssistanceMode,
view_change_callback: Callable,
avoid_obstacles: bool):
return DfNetwork(root=ControlDispatch(view_change_callback), context=ControlContext(tools, spacemouse, control_frame, assistance_mode, scene_context, avoid_obstacles))
def build_suggestion_selection_behavior(tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, use_surrogates: bool, use_snapping: bool):
return DfNetwork(root=SelectDispatch(), context=SelectionContext(tools, scene_context, control_context, use_surrogates, use_snapping))
| 1,680 |
Python
| 53.225805 | 171 | 0.738095 |
NVlabs/fast-explicit-teleop/srl/teleop/assistance/behavior/display.py
|
# Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the MIT License [see LICENSE for details].
from srl.teleop.assistance.behavior.scene import ContextTools, SceneContext
from srl.teleop.assistance.behavior.control import ControlContext
from srl.teleop.assistance.behavior.select import SelectionContext
from srl.teleop.assistance.proposals import InvalidReason, PlanePlaneProposal
from srl.teleop.assistance.transforms import invert_T, transform_dist, unpack_T
from omni.isaac.cortex.df import DfAction, DfDecider, DfDecision, DfLogicalState
import numpy as np
import quaternion
import carb
from srl.teleop.assistance.transforms import T2pq, integrate_twist_stepwise, normalized
from omni.isaac.debug_draw import _debug_draw
import time
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap
from ..profiling import profile
GRASP_FORK_START_POINTS = np.array(
[[0, -.04, -.04, 1], [0, 0, -.08, 1], [0, -.04, -.04, 1], [0, .04, -.04, 1], [0, 0, -.08, 1]])
GRASP_FORK_END_POINTS = np.array([[0, .04, -.04, 1], [0, 0, -.04, 1], [0., -.04, 0, 1], [0, .04, 0, 1], [0, 0, -.09, 1]])
AXIS_MARKER_STARTS = np.array([[0,0,0, 1], [0,0,0, 1], [0,0,0, 1]])
AXIS_MARKER_ENDS = np.array([[.05,0,0, 1], [0,.05,0, 1], [0,0,.05, 1]])
SNAPPED_COLOR_KEY = np.log(1.0)
SNAPPABLE_COLOR_KEY = np.log(0.6)
AVAILABLE_COLOR_KEY = np.log(0.1)
AVAILABLE_DOT_COLOR_KEY = np.log(0.2)
UNAVAILABLE_COLOR_KEY = float("-inf")
class DisplayContext(DfLogicalState):
def __init__(self, tools: ContextTools, scene_context: SceneContext, control_context: ControlContext, selection_context: SelectionContext, label):
super().__init__()
self.tools = tools
self.scene_context = scene_context
self.control_context = control_context
self.selection_context = selection_context
self.label = label
def get_current_robot_ghost_joint_positions(self) -> np.ndarray:
if not self.tools.robot_ghosts[0].visible:
return np.full((9,), np.NaN)
return self.tools.robot_ghosts[0].get_joint_positions()
def get_current_object_ghost_index_and_pose(self):
for i, obj in enumerate(self.tools.object_ghosts.values()):
if obj.visible:
return i, obj.get_world_pose()
return -1, (np.full((3,), np.NaN), np.full((4,), np.NaN))
class DispatchDisplay(DfDecider):
def __init__(self):
super().__init__()
self.draw = _debug_draw.acquire_debug_draw_interface()
ncolors = 256
color_array = cm.hot(np.linspace(0.,1., ncolors))
# change alpha values
color_array[:,-1] = np.linspace(0.05,0.7,ncolors)
# create a colormap object
self.cm = LinearSegmentedColormap.from_list(name='hot_alpha',colors=color_array)
self.axes_prim = None
self._last_non_empty_command_text = None
self._last_non_empty_command_stamp = -1
def enter(self):
self.add_child("show_grasp", GraspSuggestionDisplayDispatch())
self.add_child("show_placement", PlacementSuggestionDisplayDispatch())
self.add_child("show_plane_placement", PlaneSuggestionDisplayDispatch())
self.add_child("do_nothing", DfAction())
#UsdGeom.Imageable(self.context.tools.commander.target_prim.prim).MakeInvisible()
def draw_cursor_ray(self):
scene_ctx = self.context.scene_context
selection_ctx = self.context.selection_context
if not selection_ctx.cursor_ray:
ee_T = selection_ctx.tools.commander.get_fk_T()
ee_R, ee_p = unpack_T(ee_T)
# Where is the tip of the gripper pointing
ee_az = ee_T[:3, 2]
gripper_obj_path = scene_ctx.object_in_gripper.prim_path if scene_ctx.object_in_gripper else None
dir = ee_az
origin = ee_p
body, point, normal, dist = selection_ctx.tools.ray_cast(ee_p, ee_az, ignore_obj_handler=lambda path: self.context.tools.should_ignore_in_raycast(path, gripper_obj_path))
else:
origin, dir, dist = selection_ctx.cursor_ray
hit_pos = np.array(origin) + np.array(dir) * dist
self.draw.draw_lines([np.array(origin)], [hit_pos], [(.2,.2,.2, .3)], [4])
self.draw.draw_points([hit_pos], [(1, 1, 1, .6)], [16])
def draw_control_trajectory(self, v, w, v_frame, w_frame):
v_goal = v_frame @ v
w_goal = w_frame @ w
points = integrate_twist_stepwise(v_goal* 3, w_goal * 12, 2, 10)
v_dir = normalized(v_goal)
w_dir = normalized(w_goal)
twist_true = self.context.scene_context.ee_vel_tracker.get_twist()
if twist_true is None:
return
v_true = twist_true[:3]
w_true = twist_true[3:]
v_true_dir = normalized(v_true)
w_true_dir = normalized(w_true)
v_agreement = v_true_dir.dot(v_dir)
w_agreement = w_true_dir.dot(w_dir)
disagreement = 0
if not (np.allclose(v_dir, (0,0,0)) or np.allclose(v_true, (0,0,0))):
disagreement += 1 - np.abs(v_agreement)
else:
# No v goal, disagreement is just magnitude of vel
disagreement += np.linalg.norm(v_true) + np.linalg.norm(v_goal)
if not (np.allclose(w_dir, (0,0,0)) or np.allclose(w_true, (0,0,0))):
disagreement += 1 - np.abs(w_agreement)
else:
disagreement += np.linalg.norm(w_true) + np.linalg.norm(w_goal)
points_h = np.empty((len(points), 4))
points_h[:, :3] = points
points_h[:, 3] = 1
points = (self.context.tools.commander.get_eef_T() @ points_h.T).T[:, :3]
self.draw.draw_lines_spline(points.tolist(), (1.,1. - disagreement,1. - disagreement,.5), 5, False)
def update_command_text_overlay(self, label_models, new_text):
label, bg = label_models
orig_style = label.style
orig_bg_style = bg.style
if new_text == "":
# No command right now. Dim the text, but don't clear it
# until a few seconds have passed
orig_style["color"] = 0x66FFFFFF
if time.time() - self._last_non_empty_command_stamp > 3.0:
self._last_non_empty_command_text = ""
orig_bg_style["background_color"] = 0x22000000
elif new_text != self._last_non_empty_command_text:
self._last_non_empty_command_stamp = time.time()
self._last_non_empty_command_text = new_text
orig_style["color"] = 0xFFFFFFFF
orig_bg_style["background_color"] = 0x33000000
else:
self._last_non_empty_command_stamp = time.time()
label.text = self._last_non_empty_command_text
label.set_style(orig_style)
bg.set_style(orig_bg_style)
def draw_grasp_candidate_distribution(self, Ts, dist, standardize=True):
if dist is None:
return
score_probs = np.exp(dist)
max_prob = np.max(np.abs(score_probs), axis=0)
if max_prob == 0:
return
non_zero_mask = score_probs != 0
if standardize:
pass
n_grasps = sum(non_zero_mask)
n_points = len(GRASP_FORK_START_POINTS)
starts = Ts[non_zero_mask][:,None] @ GRASP_FORK_START_POINTS[None,:, :, None]
starts = np.reshape(starts, (-1, 4))[..., :3]
ends = Ts[non_zero_mask][:,None] @ GRASP_FORK_END_POINTS[None,:, :, None]
ends = np.reshape(ends, (-1, 4))[..., :3]
colors = self.cm(score_probs[non_zero_mask])
colors = np.repeat(colors, n_points, axis=0)
sizes = np.full(n_grasps * n_points, 3)
with profile("draw_call", True):
self.context.tools.draw.draw_lines(starts.tolist(), ends.tolist(), colors.tolist(), sizes.tolist())
def draw_grasp_candidate_distribution_aggregated(self, Ts, dist, max_aggregation=True):
if dist is None or Ts is None:
return
if np.max(dist) == float("-inf"):
return
nonzero_mask = dist > float("-inf")
aggregated_Ts = Ts[nonzero_mask].copy()
# Going to aggregate grasps that only differ by flip of the palm
aggregated_Ts[:, :,(0,1)] = np.abs(aggregated_Ts[:, :,(0,1)])
aggregated_Ts[:,:,(0,1,2)] = aggregated_Ts[:,:,(0,1,2)].round(1)
# Round position to 1cm
aggregated_Ts[:,:,3] = aggregated_Ts[:,:,3].round(2)
if max_aggregation:
# Sort before unique to ensure that unique values are in contiguous blocks
sorted_indices = np.lexsort((aggregated_Ts[:,0,3], aggregated_Ts[:,2,3], aggregated_Ts[:,3,3]))
unique,unique_index, unique_inv_ind, unique_counts = np.unique(aggregated_Ts[sorted_indices[:,None], :3, np.array((0,2,3))[None,:]], return_index=True, return_inverse=True, return_counts=True, axis=0)
# Unique counts is the number of repetitions of the returned unique items to, but we want to know the number of repetitions
# for our original lexsorted input
sorted_unique_inv_ind = unique_inv_ind[np.sort(unique_index)]
# Take a max over the contiguous blocks
slice_indices = np.empty((len(unique_counts + 1)), dtype=int)
slice_indices[0] = 0
slice_indices[1:] = unique_counts[sorted_unique_inv_ind].cumsum()[:-1]
score_probs = np.maximum.reduceat(np.exp(dist[nonzero_mask][sorted_indices]),slice_indices)
unique_Ts = Ts[nonzero_mask][sorted_indices][np.sort(unique_index)]
else:
unique,unique_index, unique_inv_ind = np.unique(aggregated_Ts[sorted_indices[:,None], :3, np.array((0,2,3))[None,:]], return_index=True, return_inverse=True, axis=0)
score_probs = np.zeros(len(unique), dtype=float)
# Obscure but useful capability explained here:
# https://stackoverflow.com/questions/55735716/how-to-sum-up-for-each-distinct-value-c-in-array-x-all-elements-yi-where-xi
np.add.at(score_probs, unique_inv_ind, np.exp(dist[nonzero_mask]))
unique_Ts = Ts[nonzero_mask][unique_index]
n_grasps = len(unique_Ts)
n_points = len(GRASP_FORK_START_POINTS)
starts = unique_Ts[:,None] @ GRASP_FORK_START_POINTS[None,:, :, None]
starts = np.reshape(starts, (-1, 4))[..., :3]
ends = unique_Ts[:,None] @ GRASP_FORK_END_POINTS[None,:, :, None]
ends = np.reshape(ends, (-1, 4))[..., :3]
colors = self.cm(score_probs)
colors = np.repeat(colors, n_points, axis=0)
sizes = np.full(n_grasps * n_points, 4)
with profile("draw_call", True):
self.context.tools.draw.draw_lines(starts.tolist(), ends.tolist(), colors.tolist(), sizes.tolist())
def draw_placement_distribution_aggregated(self, Ts, dist, max_aggregation=True):
if dist is None or Ts is None:
return
if np.max(dist) == float("-inf"):
return
nonzero_mask = dist > float("-inf")
aggregated_Ts = Ts[nonzero_mask].copy() @ self.context.scene_context.ee_to_obj_T
# Round position to 1cm
aggregated_Ts[:,:,3] = aggregated_Ts[:,:,3].round(2)
if max_aggregation:
sorted_indices = np.lexsort((aggregated_Ts[:,0,3], aggregated_Ts[:,1,3], aggregated_Ts[:,2,3]))
unique,unique_index, unique_inv_ind, unique_counts = np.unique(aggregated_Ts[sorted_indices, :3, 3], return_index=True, return_inverse=True, return_counts=True, axis=0)
# Unique counts is the number of repetitions of the returned unique items to, but we want to know the number of repetitions
# for our original lexsorted input
sorted_unique_inv_ind = unique_inv_ind[np.sort(unique_index)]
slice_indices = np.empty((len(unique_counts + 1)), dtype=int)
slice_indices[0] = 0
slice_indices[1:] = unique_counts[sorted_unique_inv_ind].cumsum()[:-1]
score_probs = np.maximum.reduceat(np.exp(dist[nonzero_mask][sorted_indices]),slice_indices)
unique_Ts = Ts[nonzero_mask][sorted_indices][np.sort(unique_index)]
else:
unique,unique_index, unique_inv_ind = np.unique(aggregated_Ts[sorted_indices :3, 3], return_index=True, return_inverse=True, axis=0)
score_probs = np.zeros(len(unique), dtype=float)
# Obscure but useful capability explained here:
# https://stackoverflow.com/questions/55735716/how-to-sum-up-for-each-distinct-value-c-in-array-x-all-elements-yi-where-xi
np.add.at(score_probs, unique_inv_ind, np.exp(dist[nonzero_mask]))
unique_Ts = Ts[nonzero_mask][unique_index]
n_grasps = len(unique_Ts)
points = unique_Ts[:200,:3, 3]
colors = np.array(self.cm(score_probs)[:200])
sizes = np.full(len(points), 12)
with profile("draw_call", True):
self.context.tools.draw.draw_points(points.tolist(), colors.tolist(), sizes.tolist())
def draw_motion_target_axis(self, T):
starts = np.squeeze(T @ AXIS_MARKER_STARTS[:,:,None])
ends = np.squeeze(T @ AXIS_MARKER_ENDS[:,:,None])
colors = np.array([[1,0,0, .8], [0,1,0, .8], [0,0,1,.8]])
sizes = np.full(len(AXIS_MARKER_STARTS), 10)
self.draw.draw_lines(starts.tolist(), ends.tolist(), colors.tolist(), sizes.tolist())
self.draw.draw_points([T[:3,3].tolist()], [[0.3,0.3,0.3,.8]], [16])
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
control_ctx = self.context.control_context
selection_ctx = self.context.selection_context
self.update_command_text_overlay(ctx.label, control_ctx.current_command_text)
self.draw_cursor_ray()
#self.draw_motion_target_axis(pq2T(*ctx.tools.commander.target_prim.get_world_pose()))
placement_proposal = selection_ctx.current_placement_proposal
is_plane_proposal = isinstance(placement_proposal, PlanePlaneProposal)
if control_ctx.user_gave_motion:
trans, rot = control_ctx.command
frame_trans, frame_rot = control_ctx.get_control_frames(control_ctx.control_frame)
linear_vel, angular_vel = control_ctx.control_to_twist(trans, rot)
self.draw_control_trajectory(linear_vel, angular_vel, frame_trans, frame_rot)
if scene_ctx.object_in_gripper is not None:
if scene_ctx.should_suggest_placements:
with profile("viz_placement_dist", True):
props = scene_ctx.tools.placement_table[scene_ctx.object_in_gripper_i]
self.draw_placement_distribution_aggregated(props._poses_world, selection_ctx.placement_distribution, max_aggregation=True)
if placement_proposal is not None:
if is_plane_proposal:
return DfDecision("show_plane_placement", placement_proposal)
else:
return DfDecision("show_placement", placement_proposal)
# There's something in the gripper but no proposal yet.
else:
if scene_ctx.should_suggest_grasps:
with profile("viz_dist", True):
self.draw_grasp_candidate_distribution_aggregated(ctx.tools.grasp_table._poses_world, selection_ctx.grasp_distribution, max_aggregation=True)
return DfDecision("show_grasp")
return DfDecision("do_nothing")
class DisplayGripperSuggestionGhost(DfAction):
def enter(self):
self.currently_showing = None, None
def step(self):
ghost, display_config, color, opacity = self.params
_, current_config = self.currently_showing
if current_config is None or not np.allclose(display_config, current_config):
ghost.set_joint_positions(display_config)
ghost.show(gripper_only=True)
self.currently_showing = ghost, display_config
ghost.set_color(color, opacity)
def exit(self):
ghost, _, _, _ = self.params
ghost.hide()
class DisplayObjectSuggestionGhost(DfAction):
def __init__(self):
self._currently_showing = (None, None)
def enter(self):
pass
def step(self):
ghost, T, color, opacity = self.params
self.set_currently_showing(ghost, T)
ghost.set_color(color, opacity)
def exit(self):
self.set_currently_showing(None, None)
def set_currently_showing(self, ghost, T):
to_show = (ghost, T)
current = self._currently_showing
if to_show == (None, None):
if current != (None, None):
current[0].hide()
else:
# We're trying to show something
if current != (None, None):
# Are we setting the same values as we're currently showing?
if ghost == current[0] and transform_dist(T, current[1], 0.15) < 0.005:
# Idempotent
return
elif ghost != current[0]:
# We're setting a different object so hide the old one
current[0].hide()
p, q = T2pq(T)
ghost.set_world_pose(p, quaternion.as_float_array(q))
ghost.show()
self._currently_showing = to_show
class GraspSuggestionDisplayDispatch(DfDecider):
"""
Governs rendering of an existing grasp proposal
"""
def enter(self):
self.add_child("display_grasp_suggestion", DisplayGripperSuggestionGhost())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
selection_ctx = self.context.selection_context
proposal = selection_ctx.current_grasp_proposal
if proposal is None or not proposal.valid:
return DfDecision("do_nothing")
T = proposal.T_world
if np.any(np.isnan(proposal.joint_config)):
carb.profiler.begin(1, "grasp_display.ik", active=True)
p,q = T2pq(T)
actions, success = ctx.tools.solver.compute_inverse_kinematics(
target_position=p,
target_orientation=quaternion.as_float_array(q),
)
carb.profiler.end(1, True)
if not success:
proposal.mark_invalid(InvalidReason.UNREACHABLE)
return DfDecision("do_nothing")
else:
proposal.update_eff_goal(T, actions.joint_positions.astype(float)[:-2])
display_config = np.empty(9)
display_config[:7] = proposal.joint_config
# IK Doesn't solve for the fingers. Manually set open values
display_config[7] = 0.04
display_config[8] = 0.04
# First time showing this one?
color = "white"
return DfDecision("display_grasp_suggestion", (ctx.tools.robot_ghosts[0], display_config, color, .4))
class PlacementSuggestionDisplayDispatch(DfDecider):
"""
Governs rendering of existing placement proposal
"""
def enter(self):
self.add_child("display_placement_suggestion", DisplayObjectSuggestionGhost())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
proposal = self.params
placement_T = proposal.get_placement_T()
eff_T_goal = placement_T @ invert_T(scene_ctx.ee_to_obj_T)
eff_pq = T2pq(eff_T_goal)
actions, success = ctx.tools.solver.compute_inverse_kinematics(
target_position=eff_pq[0],
target_orientation=quaternion.as_float_array(eff_pq[1]),
)
if not success:
proposal.mark_invalid(InvalidReason.UNREACHABLE)
return DfDecision("do_nothing")
eef_T = ctx.tools.commander.get_fk_T()
dist_to_placement = transform_dist(eef_T, eff_T_goal, 0.15)
if dist_to_placement < .02:
return DfDecision("do_nothing")
object_ghost = ctx.tools.object_ghosts[scene_ctx.object_in_gripper.name + "_ghost"]
color = "white"
return DfDecision("display_placement_suggestion", (object_ghost, placement_T, color, .4))
class PlaneSuggestionDisplayDispatch(DfDecider):
"""
Governs rendering of existing placement proposal
"""
def enter(self):
self.add_child("display_placement_suggestion", DisplayObjectSuggestionGhost())
self.add_child("do_nothing", DfAction())
def decide(self):
ctx = self.context
scene_ctx = self.context.scene_context
proposal = self.params
eff_T_goal = proposal.T_world
placement_T = eff_T_goal @ scene_ctx.ee_to_obj_T
eef_T = ctx.tools.commander.get_eef_T()
dist_to_placement = transform_dist(eef_T, eff_T_goal, 0.15)
eff_pq = T2pq(eff_T_goal)
"""actions, success = ctx.tools.solver.compute_inverse_kinematics(
target_position=eff_pq[0],
target_orientation=quaternion.as_float_array(eff_pq[1]),
)
if not success:
proposal.mark_invalid(InvalidReason.UNREACHABLE)
return DfDecision("do_nothing")"""
if dist_to_placement < .02:
return DfDecision("do_nothing")
object_ghost = ctx.tools.object_ghosts[scene_ctx.object_in_gripper.name + "_ghost"]
color = "white"
return DfDecision("display_placement_suggestion", (object_ghost, placement_T, color, .2))
| 21,459 |
Python
| 44.952891 | 212 | 0.613356 |
NVlabs/fast-explicit-teleop/config/extension.toml
|
[core]
reloadable = true
order = 0
[package]
version = "1.0.0"
category = "Simulation"
title = "SRL Teleop Assist"
description = "Extension for Fast Explicit-Input Assistance for Teleoperation in Clutter"
authors = ["NVIDIA"]
repository = ""
keywords = ["isaac", "teleoperation", "manipulation"]
changelog = "docs/CHANGELOG.md"
readme = "README.md"
preview_image = "data/preview.png"
icon = "data/icon.png"
writeTarget.kit = true
[dependencies]
"omni.kit.uiapp" = {}
"omni.kit.quicklayout" = {}
"omni.usd" = []
"omni.ui.scene" = {}
"omni.kit.viewport.utility" = {}
"omni.physx" = {}
"omni.isaac.dynamic_control" = {}
"omni.isaac.ui" = {}
"omni.isaac.core" = {}
"omni.isaac.cortex" = {}
"omni.isaac.franka" = {}
"omni.isaac.motion_generation" = {}
"srl.spacemouse" = {}
[[python.module]]
name = "srl.teleop.base_sample"
[[python.module]]
name = "srl.teleop.assistance"
[[python.module]]
name = "srl.teleop.analysis"
[[test]]
timeout = 960
[python.pipapi]
requirements = ["numpy", "rtree", "scipy", "trimesh", "h5py"]
use_online_index = true
| 1,047 |
TOML
| 20.387755 | 89 | 0.671442 |
NVlabs/fast-explicit-teleop/docs/CHANGELOG.md
|
**********
CHANGELOG
**********
| 34 |
Markdown
| 4.833333 | 10 | 0.264706 |
Kaedim/omniverse-extension/README.md
|
# Extension Project Template
This project is the kaedim omniverse plugin
It currently supports login functionality and file import
Documentation on how to ise the plugin is linked here: https://docs.google.com/document/d/1fqclE2bfRH_RgXyCNmCKX2DZF5y6ZT9OYANDNyyiY5M/edit?usp=sharing
| 286 |
Markdown
| 39.999994 | 151 | 0.835664 |
Kaedim/omniverse-extension/exts/kaedim.extension/kaedim/extension/extension.py
|
import omni.ext
import omni.ui as ui
import json
import os
import omni.kit.commands
import omni.usd
from pxr import Sdf
from urllib.request import Request, urlretrieve, urlopen
from urllib.error import URLError
import http.client
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class KaedimExtensionExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def load_credentials(self):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'credentials.json')
# Check if the file exists
if os.path.isfile(filepath):
# File exists, open it and try to read 'devID'
with open(filepath, 'r') as file:
data = json.load(file)
self.devID = data.get('devID', None)
self.apiKey = data.get('apiKey', None)
self.refreshToken = data.get('refreshToken', None)
self.jwt = data.get('jwt', None)
def update_json_file(self, kv_pairs):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'credentials.json')
data = {}
if os.path.isfile(filepath):
with open(filepath, 'r') as f:
data = json.load(f)
# Replace existing keys with new values, or add new keys
for key, value in kv_pairs.items():
data[key] = value
with open(filepath, 'w') as f:
json.dump(data, f, indent=4)
def login(self, devID, apiKey):
conn = http.client.HTTPSConnection("api.kaedim3d.com")
payload = json.dumps({
"devID": devID,
"destination": "https://nvidia.kaedim3d.com/hook"
})
headers = {
'X-API-Key': apiKey,
'Content-Type': 'application/json'
}
conn.request("POST", "/api/v1/registerHook", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
data = json.loads(data)
if data["status"] == "success":
self.jwt = data["jwt"]
return True
return False
def refresh_jwt(self, devID, apiKey, rToken):
print(rToken)
payload = json.dumps({
"devID": devID
})
headers = {
'X-API-Key': apiKey,
'refresh-token': rToken,
'Content-Type': 'application/json'
}
conn = http.client.HTTPSConnection("api.kaedim3d.com")
conn.request("POST", "/api/v1/refreshJWT", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
data = json.loads(data)
if data["status"] == "success":
return data["jwt"]
return None
def login_panel(self, ext_id):
with self._window.frame:
devID = ui.SimpleStringModel()
apiKey = ui.SimpleStringModel()
rToken = ui.SimpleStringModel()
def on_connect():
jwt = ''
res = self.login(devID.as_string, apiKey.as_string)
if res:
label.text = 'Successfully logged in'
jwt = self.refresh_jwt(devID.as_string, apiKey.as_string, rToken.as_string)
if res and jwt is not None:
credentials = {
"devID" : devID.as_string,
"apiKey": apiKey.as_string,
"refreshToken": rToken.as_string,
"jwt": jwt
}
self.devID = devID.as_string
self.apiKey = apiKey.as_string
self.refreshToken = rToken.as_string
self.jwt = jwt
self.update_json_file(credentials)
self.load_ui(ext_id)
label.text = 'Successfully logged in'
else:
label.text = 'Oops! Something went wrong please try'
with ui.VStack():
ui.Label("Please enter your credentials:")
ui.Spacer(height=10)
ui.Label("DevID:")
ui.Spacer(height=5)
ui.StringField(model=devID, alignment=ui.Alignment.H_CENTER)
ui.Spacer(height=10)
ui.Label("Api-Key:")
ui.Spacer(height=5)
ui.StringField(model=apiKey, alignment=ui.Alignment.H_CENTER)
ui.Spacer(height=10)
ui.Label("Refresh-Token:")
ui.Spacer(height=5)
ui.StringField(model=rToken, alignment=ui.Alignment.H_CENTER)
ui.Spacer(height=5)
label = ui.Label("")
ui.Spacer(height=10)
ui.Button("Conect", clicked_fn=on_connect)
def asset_library(self, ext_id):
def import_asset():
asset = self.selected_asset
if not asset or asset is None: return
valid_iterations = [i for i in asset['iterations'] if i['status'] == 'completed' or i['status']=='uploaded']
latest_version = max(valid_iterations, key=lambda x: x['iterationID'])
results = latest_version['results']
name = asset['image_tags'][0]
requestID = asset['requestID']
if type(results) == dict:
file_path = check_and_download_file(requestID, results['obj'], 'obj')
omni.kit.commands.execute("CreateReference",
path_to=Sdf.Path("/World/"+name), # Prim path for where to create the reference
asset_path=file_path, # The file path to reference. Relative paths are accepted too.
usd_context=omni.usd.get_context()
)
def fetch_assets():
conn = http.client.HTTPSConnection("api.kaedim3d.com")
print('data', self.devID, self.apiKey, self.jwt)
payload = json.dumps({
"devID": self.devID
})
headers = {
'X-API-Key': self.apiKey,
'Authorization': self.jwt,
'Content-Type': 'application/json'
}
conn.request("GET", "/api/v1/fetchAll/", payload, headers)
res = conn.getresponse()
data = res.read()
print(data)
jwt = ''
if res.status == 401:
self.jwt = self.refresh_jwt(self.devID, self.apiKey, self.refreshToken)
headers["Authorization"] = self.jwt
conn.request("GET", "/api/v1/fetchAll/", payload, headers)
res = conn.getresponse()
data = res.read()
if res.status == 200:
if jwt:
credentials = {"jwt": self.jwt}
self.update_json_file(credentials)
data = json.loads(data)
assets = data["assets"]
asset_library_ui(assets)
if len(assets) <= 0:
print('No assets')
else:
print('Ok')
else:
print('Error')
def check_and_download_file(filename, url, filetype):
# Make sure the folder path exists
ext_manager = omni.kit.app.get_app().get_extension_manager()
ext_path = ext_manager.get_extension_path(ext_id)
folder_path = ext_path + "/data"
if not os.path.exists(folder_path):
print(f"The folder {folder_path} does not exist.")
return
file_path = os.path.join(folder_path, filename + '.' + filetype)
# # Check if the file exists
if not os.path.isfile(file_path):
# Download and save the file from the url
try:
urlretrieve(url, file_path)
# print(f"File downloaded and saved as {filename} in the folder {folder_path}.")
except Exception as e:
print(f"Error occurred while downloading the file: {e}")
return file_path
def select_asset(asset):
self.selected_asset = asset
def isCompleted(asset):
completedIterations = [i for i in asset['iterations'] if i['status']=='completed' or i['status']=='uploaded']
return len(completedIterations) > 0
def logout():
emptyCredentials = {'devID':'','apiKey':'','jwt':'','refreshToken':''}
self.update_json_file(emptyCredentials)
self.login_panel(ext_id)
def asset_library_ui(assets):
self.selected_asset = None
with self._window.frame:
with ui.VStack():
with ui.HStack(height=20):
ui.Button('Refresh', height=20, clicked_fn=fetch_assets)
ui.Button('Logout', height=20, clicked_fn=logout)
with ui.ScrollingFrame():
with ui.Grid(ui.Direction(2), column_width=120, row_height=120):
for asset in assets:
url = asset['image'][0]
source_url = check_and_download_file(asset['requestID'], url, 'png')
name = asset['image_tags'][0]
completed = isCompleted(asset)
if not completed: name = name + '\n' + asset['iterations'][len(asset['iterations'])-1]['status']
ui.Button(name, enabled=completed, image_url=source_url, clicked_fn=lambda asset=asset: select_asset(asset))
ui.Button('Import', height=20, clicked_fn=import_asset)
fetch_assets()
def load_ui(self, ext_id):
with self._window.frame:
ui.Button('Load assets', clicked_fn=lambda ext_id=ext_id: self.asset_library(ext_id))
def on_startup(self, ext_id):
self._window = ui.Window("Kaedim Extension", width=300, height=300)
self.jwt = ''
self.load_credentials()
if not self.devID or not self.apiKey or not self.refreshToken:
self.login_panel(ext_id)
else:
print('User already logged in', self.devID)
self.asset_library(ext_id)
def on_shutdown(self):
print("kaedim extension shutdown")
return
| 10,793 |
Python
| 40.675676 | 140 | 0.523765 |
Kaedim/omniverse-extension/exts/kaedim.extension/docs/README.md
|
# Kaedim Omniverse Etesion [kaedim.extension]
This project is the kaedim omniverse plugin
It currently supports login functionality and file import
Documentation on how to ise the plugin is linked here: https://docs.google.com/document/d/1fqclE2bfRH_RgXyCNmCKX2DZF5y6ZT9OYANDNyyiY5M/edit?usp=sharing
| 304 |
Markdown
| 37.124995 | 151 | 0.828947 |
SoulVisionCreations/avataar_omniverse_ext/README.md
|
# Omniverse Extension
This extension enables users to seamlessly integrate 3D objects created using Avataar Incarnate App on the Omniverse platform.
## Install & Enable the Avataar Extension
https://github.com/SoulVisionCreations/avataar_omniverse_ext/assets/117340215/4f632c56-9236-4a68-b9b1-be225e381f77
## Instructions to add this Extension
To add this extension to your Omniverse app:
1. Go into: `Extension Manager` -> `Hamburger Icon` -> `Settings` -> `Extension Search Path`
2. Add this as a search path: `git://github.com/SoulVisionCreations/avataar_omniverse_ext.git?branch=master&dir=exts`
Alternatively:
1. Download or Clone the extension, unzip the file if downloaded
2. Copy the `exts` folder path within the extension folder
- i.e. `/home/.../avataar_omniverse_ext/exts` (Linux) or `C:/.../avataar_omniverse_ext/ext` (Windows)
3. Go into: `Extension Manager` -> `Hamburger Icon` -> `Settings` -> `Extension Search Path`
4. Add the `exts` folder path as a search path
Make sure no filter is enabled and in both cases you should be able to find the new extension in the `Third Party` tab list.
## Usage
https://github.com/SoulVisionCreations/avataar_omniverse_ext/assets/117340215/c3bf6f78-14c8-4543-ae00-833b643b12d7
| 1,241 |
Markdown
| 48.679998 | 126 | 0.768735 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/extension.py
|
import asyncio
import os
from os.path import abspath
from inspect import getsourcefile
from pathlib import Path
import shutil
import urllib.request
import zipfile
import carb
import omni.ext
import omni.ui as ui
import omni.kit.asset_converter
import omni.usd
from pxr import Sdf
import warnings
import usdrt
import time
warnings.filterwarnings("ignore")
class IncarnateOmniverseExtension(omni.ext.IExt):
def progress_callback(self, current_step: int, total: int):
print(f"{current_step} of {total}")
async def convert_asset_to_usd(self, input_asset: str, output_usd: str):
converter_context = omni.kit.asset_converter.AssetConverterContext()
# ... (other configurations)
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(input_asset, output_usd, self.progress_callback, converter_context)
success = await task.wait_until_finished()
if not success:
carb.log_error(task.get_status(), task.get_detailed_error())
print("converting done")
async def on_click_async(self, input_url):
newfolder = os.path.join(self.download_path, input_url.model.get_value_as_string().split("/")[-1][:-4])
os.makedirs(newfolder, exist_ok=True)
self.destination_path = os.path.join(newfolder, input_url.model.get_value_as_string().split("/")[-1])
with urllib.request.urlopen(input_url.model.get_value_as_string()) as response, open(self.destination_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
with zipfile.ZipFile(self.destination_path, 'r') as zip_ref:
zip_ref.extractall(newfolder)
await self.convert_asset_to_usd(os.path.join(newfolder,'obj', input_url.model.get_value_as_string().split("/")[-1][:-4] + ".obj"), os.path.join(newfolder, input_url.model.get_value_as_string().split("/")[-1][:-4] + ".usd"))
object_id = input_url.model.get_value_as_string().split("/")[-1][:-4]
asset_path = os.path.join(self.download_path, object_id, f'{object_id}.usd')
omni.kit.commands.execute(
'CreatePayloadCommand',
usd_context=omni.usd.get_context(),
path_to=Sdf.Path(f'/World/{object_id}'),
asset_path=asset_path,
instanceable=False)
self.objects.append(object_id)
def on_startup(self, ext_id):
print("[incarnate.omniverse] incarnate omniverse startup")
self.objects = []
self._count = 0
cur_path = Path(abspath(getsourcefile(lambda:1)))
self.currentdirectory = str(cur_path.parent.absolute())
self.download_path = os.path.join(self.currentdirectory,"downloads")
os.makedirs(self.download_path, exist_ok=True)
self._window = ui.Window("Incarnate Avataar Extension", width=300, height=200)
with self._window.frame:
with ui.VStack():
ui.Label("Enter mesh link from Avataar Creator")
self.user_profile = os.path.expanduser("~")
input_f = ui.StringField()
ui.Button("Import and View", clicked_fn=lambda: asyncio.ensure_future(self.on_click_async(input_f)))
def on_shutdown(self):
omni.kit.commands.execute(
'DeletePrimsCommand',
paths=[Sdf.Path(f'/World/{object_id}') for object_id in self.objects])
objs = os.listdir(self.download_path)
for obj in objs:
try:
shutil.rmtree(os.path.join(self.download_path,obj))
except:
print("Unable to delete")
print("[incarnate.omniverse] incarnate omniverse shutdown")
| 3,677 |
Python
| 39.417582 | 231 | 0.643731 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/incarnate/omniverse/assetconverter.py
|
import carb
import omni
import omni.kit.asset_converter
# Progress of processing.
def progress_callback (current_step: int, total: int):
# Show progress
print(f"{current_step} of {total}")
# Convert asset file(obj/fbx/glTF, etc) to usd.
async def convert_asset_to_usd (input_asset: str, output_usd: str):
# Input options are defaults.
converter_context = omni.kit.asset_converter.AssetConverterContext()
converter_context.ignore_materials = False
converter_context.ignore_camera = False
converter_context.ignore_animations = False
converter_context.ignore_light = False
converter_context.export_preview_surface = False
converter_context.use_meter_as_world_unit = False
converter_context.create_world_as_default_root_prim = True
converter_context.embed_textures = True
converter_context.convert_fbx_to_y_up = False
converter_context.convert_fbx_to_z_up = False
converter_context.merge_all_meshes = False
converter_context.use_double_precision_to_usd_transform_op = False
converter_context.ignore_pivots = False
converter_context.keep_all_materials = True
converter_context.smooth_normals = True
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(input_asset, output_usd, progress_callback, converter_context)
# Wait for completion.
success = await task.wait_until_finished()
if not success:
carb.log_error(task.get_status(), task.get_detailed_error())
print("converting done")
| 1,501 |
Python
| 39.594594 | 103 | 0.746835 |
SoulVisionCreations/avataar_omniverse_ext/exts/incarnate.omniverse/docs/README.md
|
# Incarnate Avataar Extension
Bring in 3D outputs from Avataar's Creator Platform and Avataar's Incarnate Scanning App into Nvidia Omniverse.
## Description
This extension allows an omniverse user to directly import 3D mesh outputs from Avataar's Incarnate scanning app (available for free on Apple's App store) and directly import from Avataar's creator platform (at creator.avataar.ai.)
Key features:
1. Ease of 3D Model Creation - Avataar's mobile scanning app makes 3D model creation easy. Simply scan any object and get a 3D model mesh output that is importable into Omniverse in a matter of a few hours of processing time.
2. Ease of 3D Model import - easily download your 3D model from Avataar's creator platform (at creator.avataar.ai). You'll be able to download an .obj mesh file of the object that you scanned with the Avataar mobile app and import this 3D model into any omniverse scene.
| 904 |
Markdown
| 89.499991 | 270 | 0.797566 |
BeanSamuel/Exchange-Rate-Prediction-RL/env.py
|
from copy import deepcopy
from time import time
from enum import Enum
import numpy as np
import matplotlib.pyplot as plt
import gymnasium as gym
import pandas as pd
import torch
from torch.distributions import Categorical
from utils.runner import Runner
class Actions(Enum):
Buy_NTD = 0
Buy_AUD = 1
Buy_CAD = 2
Buy_EUR = 3
Buy_GBP = 4
Buy_HKD = 5
Buy_JPY = 6
Buy_SGD = 7
Buy_USD = 8
class Positions(Enum):
# 代表持有幣別
NTD = 0
AUD = 1
CAD = 2
EUR = 3
GBP = 4
HKD = 5
JPY = 6
SGD = 7
USD = 8
def opposite(self, action):
return Positions(action)
class TradingEnv(gym.Env):
metadata = {'render_modes': ['human'], 'render_fps': 3}
def __init__(self, df, window_size, render_mode=None):
assert df.ndim == 2
assert render_mode is None or render_mode in self.metadata['render_modes']
self.render_mode = render_mode
self.df = df
self.window_size = window_size
self.prices, self.signal_features = self._process_data()
self.shape = (window_size, self.signal_features.shape[1])
# spaces
self.action_space = gym.spaces.Discrete(len(Actions))
INF = 1e10
self.observation_space = gym.spaces.Box(
low=-INF, high=INF, shape=self.shape, dtype=np.float32,
)
# episode
self._start_tick = self.window_size
self._end_tick = len(self.prices) - 1
self._truncated = None
self._current_tick = None
self._last_trade_tick = None
self._position = None
self._position_history = None
self._last_position = None
self._action = None
self._total_reward = None
self._total_profit = None
self._first_rendering = None
self.history = None
def reset(self, seed=None, options=None):
super().reset(seed=seed, options=options)
self.action_space.seed(int((self.np_random.uniform(0, seed if seed is not None else 1))))
self._truncated = False
self._current_tick = self._start_tick
self._last_trade_tick = self._current_tick - 1
self._position = Positions.NTD
self._position_history = (self.window_size * [None]) + [self._position]
self._action = 0
self._total_reward = 0.
self._total_profit = 1. # unit
self._first_rendering = True
self.history = {}
observation = self._get_observation()
info = self._get_info()
if self.render_mode == 'human':
self._render_frame()
return observation, info
def step(self, action):
# print(action)
self._action = action
self._truncated = False
self._current_tick += 1
if self._current_tick == self._end_tick:
self._truncated = True
step_reward = self._calculate_reward(action)
self._total_reward += step_reward
self._update_profit(action)
trade = False
if action != self._position.value:
trade = True
if trade:
self._last_position = self._position
self._position = self._position.opposite(action)
self._last_trade_tick = self._current_tick
self._position_history.append(self._position)
observation = self._get_observation()
info = self._get_info()
self._update_history(info)
if self.render_mode == 'human':
self._render_frame()
return observation, step_reward, self._truncated, info
def _get_info(self):
return dict(
total_reward=self._total_reward,
total_profit=self._total_profit,
position=self._position
)
def _get_observation(self):
return self.signal_features[self._current_tick - self.window_size:self._current_tick]
def _update_history(self, info):
if not self.history:
self.history = {key: [] for key in info.keys()}
for key, value in info.items():
self.history[key].append(value)
def _render_frame(self):
self.render()
def choice_price_col(self, position, buy_or_sell="買入"):
foreign_price = None
if position == Positions.AUD:
foreign_price = self.prices[f'AUD即期{buy_or_sell}'].to_numpy()
elif position == Positions.CAD:
foreign_price = self.prices[f'CAD即期{buy_or_sell}'].to_numpy()
elif position == Positions.EUR:
foreign_price = self.prices[f'EUR即期{buy_or_sell}'].to_numpy()
elif position == Positions.GBP:
foreign_price = self.prices[f'GBP即期{buy_or_sell}'].to_numpy()
elif position == Positions.HKD:
foreign_price = self.prices[f'HKD即期{buy_or_sell}'].to_numpy()
elif position == Positions.JPY:
foreign_price = self.prices[f'JPY即期{buy_or_sell}'].to_numpy()
elif position == Positions.SGD:
foreign_price = self.prices[f'SGD即期{buy_or_sell}'].to_numpy()
elif position == Positions.USD:
foreign_price = self.prices[f'USD即期{buy_or_sell}'].to_numpy()
return foreign_price
def render(self, mode='human'):
def _plot_position():
# 有買賣
if self._action != self._position.value:
# 現在不是持有台幣(即有買入外幣)
if self._position != Positions.NTD:
# 買入用紅色
buy_price_col = self.choice_price_col(self._position)
plt.scatter(self._current_tick, buy_price_col[self._current_tick], color='red')
# 上一步不是持有台幣(即有賣出外幣)
if self._last_position != Positions.NTD:
# 賣出用綠色
sell_price_col = self.choice_price_col(self._last_position)
plt.scatter(self._current_tick, sell_price_col[self._current_tick], color='green')
start_time = time()
if self._first_rendering:
self._first_rendering = False
plt.cla()
plt.plot(self.prices['AUD即期買入'].to_numpy(), label="AUD")
plt.plot(self.prices['CAD即期買入'].to_numpy(), label="CAD")
plt.plot(self.prices['EUR即期買入'].to_numpy(), label="EUR")
plt.plot(self.prices['GBP即期買入'].to_numpy(), label="GBP")
plt.plot(self.prices['HKD即期買入'].to_numpy(), label="HKD")
plt.plot(self.prices['JPY即期買入'].to_numpy(), label="JPY")
plt.plot(self.prices['SGD即期買入'].to_numpy(), label="SGD")
plt.plot(self.prices['USD即期買入'].to_numpy(), label="USD")
# plt.yscale('log')
plt.legend(bbox_to_anchor=(1.0, 1.0))
# 起始點標藍色
plt.scatter(self._current_tick, self.prices['AUD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['CAD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['EUR即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['GBP即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['HKD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['JPY即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['SGD即期買入'].to_numpy()[self._current_tick], color='blue')
plt.scatter(self._current_tick, self.prices['USD即期買入'].to_numpy()[self._current_tick], color='blue')
_plot_position()
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
end_time = time()
process_time = end_time - start_time
pause_time = (1 / self.metadata['render_fps']) - process_time
assert pause_time > 0., "High FPS! Try to reduce the 'render_fps' value."
plt.pause(pause_time)
def render_all(self, title=None):
plt.cla()
plt.plot(self.prices['AUD即期買入'].to_numpy(), label="AUD")
plt.plot(self.prices['CAD即期買入'].to_numpy(), label="CAD")
plt.plot(self.prices['EUR即期買入'].to_numpy(), label="EUR")
plt.plot(self.prices['GBP即期買入'].to_numpy(), label="GBP")
plt.plot(self.prices['HKD即期買入'].to_numpy(), label="HKD")
plt.plot(self.prices['JPY即期買入'].to_numpy(), label="JPY")
plt.plot(self.prices['SGD即期買入'].to_numpy(), label="SGD")
plt.plot(self.prices['USD即期買入'].to_numpy(), label="USD")
plt.legend(bbox_to_anchor=(1.0, 1.0))
last_positions = Positions.NTD
for i, position in enumerate(self._position_history):
if position != None:
# 有買賣
if position != last_positions:
# 現在不是持有台幣(即有買入外幣)
if position != Positions.NTD:
price_col = self.choice_price_col(position)
plt.scatter(i, price_col[i], color='red')
# 上一步不是持有台幣(即有賣出外幣)
if last_positions != Positions.NTD:
price_col = self.choice_price_col(last_positions)
plt.scatter(i, price_col[i], color='green')
last_positions = self._position_history[i]
if title:
plt.title(title)
plt.suptitle(
"Total Reward: %.6f" % self._total_reward + ' ~ ' +
"Total Profit: %.6f" % self._total_profit
)
def close(self):
plt.close()
def save_rendering(self, filepath):
plt.savefig(filepath)
def pause_rendering(self):
plt.show()
def _process_data(self):
raise NotImplementedError
def _calculate_reward(self, action):
raise NotImplementedError
def _update_profit(self, action):
raise NotImplementedError
class ForexEnv(TradingEnv):
def __init__(self, cfg):
self.config = cfg
self.cfg = cfg = cfg['task']['env']
self.train_df = pd.read_csv(cfg['train_data'])
self.train_df.replace("-", 0, inplace=True)
self.test_df = pd.read_csv(cfg['test_data'])
self.test_df.replace("-", 0, inplace=True)
self.frame_bound = cfg['frame_bound']
self.num_envs = cfg['num_envs']
self.window_size = cfg['window_size']
super().__init__(self.train_df, self.window_size, None)
self.num_obs = int(np.prod(self.observation_space.shape)) + 9
self.num_actions = int(np.prod(self.action_space.shape))
self.num_values = 1
self.obs = torch.zeros([self.num_envs, self.num_obs], dtype=torch.float)
self.reset()
def reset_done(self):
if self._truncated:
Runner.logger.log({'total profit': self._total_profit})
self.obs, _ = self.reset()
self.compute_obs()
return self.obs
def compute_obs(self):
ct_obs = [0] * 9
ct_obs[self._position.value] = 1
self.obs = torch.tensor(self.obs)
obs = list(self.obs.flatten()) + ct_obs
self.obs = torch.tensor(obs, dtype=torch.float).reshape(1, self.num_obs)
def step(self, action):
self.obs, rew, reset, _ = super().step(action[0].item())
Runner.logger.log({'reward': rew})
self.compute_obs()
rew = torch.tensor(rew, dtype=torch.float).reshape(1, 1)
reset = torch.tensor(reset, dtype=torch.long).reshape(1, 1)
return self.obs, rew, reset, {}
def _update_profit(self, action):
# 有交易
if action != self._position.value:
# 原本非台幣
if self._position != Positions.NTD:
# 此處賣出為銀行方,等於投資者的買入
buy_price_col = self.choice_price_col(self._position, "賣出")
buy_price = buy_price_col[self._last_trade_tick]
# 此處買入為銀行方,等於投資者的賣出
sell_price_col = self.choice_price_col(self._position, "買入")
sell_price = sell_price_col[self._current_tick]
self._total_profit = (self._total_profit / buy_price) * sell_price
# 結束
if self._truncated:
if action != Actions.Buy_NTD.value:
buy_price_col = self.choice_price_col(Positions(action), "賣出")
buy_price = buy_price_col[self._last_trade_tick]
sell_price_col = self.choice_price_col(Positions(action), "買入")
sell_price = sell_price_col[self._current_tick]
self._total_profit = (self._total_profit / buy_price) * sell_price
def get_total_profit(self):
return self._total_profit
def _calculate_reward(self, action):
reward = 0
if self._position == Positions.NTD:
reward = 0
else:
price_col = self.choice_price_col(self._position)
current_price = price_col[self._current_tick]
last_day_price = price_col[self._current_tick-1]
reward = (current_price - last_day_price) / last_day_price
return reward * 100
# reward = 0
#
# if action != self._position.value:
# # 原本非台幣
# if self._position != Positions.NTD:
# # 此處賣出為銀行方,等於投資者的買入
# buy_price_col = self.choice_price_col(self._position, "賣出")
# buy_price = buy_price_col[self._last_trade_tick]
#
# # 此處買入為銀行方,等於投資者的賣出
# sell_price_col = self.choice_price_col(self._position, "買入")
# sell_price = sell_price_col[self._current_tick]
# reward = (self._total_profit / buy_price) * sell_price - self._total_profit
#
# # 結束
# elif self._truncated:
# if action != Actions.Buy_NTD.value:
# buy_price_col = self.choice_price_col(Positions(action), "賣出")
# buy_price = buy_price_col[self._last_trade_tick]
#
# sell_price_col = self.choice_price_col(Positions(action), "買入")
# sell_price = sell_price_col[self._current_tick]
#
# reward = (self._total_profit / buy_price) * sell_price - self._total_profit
#
# return reward * 1000
def _process_data(self):
start = self.frame_bound[0] - self.window_size
end = self.frame_bound[1]
prices = self.df.iloc[start:end, :].filter(like='即期')
# 這邊可修改想要使用的 feature
signal_features = self.df.iloc[:, 1:].to_numpy()[start:end]
return prices, signal_features
def test(self):
frame_bounds = [(10, 100), (10, 300), (10, 800)]
mean_profit = 0
for frame_bound in frame_bounds:
cfg = deepcopy(self.config)
cfg['task']['env']['train_data'] = self.cfg['test_data']
cfg['task']['env']['frame_bound'] = frame_bound
env = ForexEnv(cfg)
env.obs, _ = env.reset()
env.compute_obs()
while True:
action = self.agent.model.get_action(env.obs, test=True)
obs, reward, done, info = env.step(action)
if done:
mean_profit += env.get_total_profit()
break
mean_profit /= len(frame_bounds)
Runner.logger.log({'test profit': mean_profit})
return mean_profit
def save(self):
return None
def load(self, x):
pass
| 15,508 |
Python
| 33.851685 | 112 | 0.559195 |
BeanSamuel/Exchange-Rate-Prediction-RL/run.py
|
import cProfile
from utils.hydra_cfg.hydra_utils import *
from utils.runner import Runner
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def run(cfg):
Runner.init(cfg)
if cfg.profile:
cProfile.runctx("Runner.run()", globals(), locals(), "profile.pstat")
else:
Runner.run()
def runs(cfg):
#
# # policy gradient
# cfg.train.name = 'PGAgent'
#
# # reward
# cfg.train.params.config.tau = 0
# cfg.train.params.config.gamma = 0
# run(cfg)
#
# cfg.train.params.config.tau = 0.75
# cfg.train.params.config.gamma = 0.75
# run(cfg)
#
# # mlp size
# cfg.train.params.model.actor_mlp = [32, 32]
# cfg.train.params.model.critic_mlp = [32, 32]
# cfg.train.params.config.learning_rate = 1e-3
# cfg.train.params.config.minibatch_size = 512
# run(cfg)
#
# # batch size
# cfg.train.params.model.actor_mlp = [256, 256]
# cfg.train.params.model.critic_mlp = [256, 256]
# cfg.train.params.config.learning_rate = 1e-3
# cfg.train.params.config.minibatch_size = 64
# run(cfg)
#
# # lr
# cfg.train.params.model.actor_mlp = [256, 256]
# cfg.train.params.model.critic_mlp = [256, 256]
# cfg.train.params.config.learning_rate = 1e-2
# cfg.train.params.config.minibatch_size = 512
# run(cfg)
# ppo
cfg.train.name = 'PPOAgent'
cfg.train.params.model.actor_mlp = [256, 256]
cfg.train.params.model.critic_mlp = [256, 256]
cfg.train.params.config.learning_rate = 1e-3
cfg.train.params.config.minibatch_size = 512
run(cfg)
@hydra.main(config_name="config", config_path="./cfg")
def parse_hydra_configs(cfg: DictConfig):
if cfg.debug:
cfg.wandb = cfg.debug == "wandb"
cfg.save = cfg.debug == "save"
cfg.task.env.num_envs = 1
runs(cfg)
elif cfg.test:
cfg.wandb = False
cfg.save = False
cfg.task.env.num_envs = 1
cfg.train.params.config.minibatch_size = 1
runs(cfg)
else:
runs(cfg)
Runner.close()
if __name__ == "__main__":
parse_hydra_configs()
| 2,138 |
Python
| 23.872093 | 77 | 0.606642 |
BeanSamuel/Exchange-Rate-Prediction-RL/README.md
|
# 2023 Artificial Intelligence Hw2 -- 多國外幣匯率預測(RL)
## 任務
利用強化學習 (Reinforcement Learning) 進行外匯投資決策
## Environment 規則
- 有 8 種外幣+台幣一共 9 種
- 某個時間點 𝑖 只能同時擁有一種幣
- 初始本金 = 1 元台幣
- **Observation**: 第 𝑖 − 10 日 ~ 第 𝑖 日的外匯資料 (𝑖 ≥ 10)
- **Action**: 買入其中一種外幣 or 買入台幣總共 9 種 actions
- 買入新貨幣時會先賣出原貨幣再買入新貨幣(都是以現鈔來看)
- 買入貨幣會以"現鈔賣出"資料,賣出貨幣會以"現鈔買入"資料做計算,中間價差即為銀行手續費。(因為現鈔賣出、現鈔買入皆為銀行面相)
- 若買入的幣別和持有的幣別相同,則不做任何動作
- **Position**: 目前持有的幣別,一共 9 種
- **Total Reward**: 持有台幣時為 0,持有其他外幣,則計算此外幣匯率第 𝑖 日和 𝑖 - 1 日的之間的漲跌幅,每天累加後的總合
- **Total Profit**: 資金成長比 = 最終本金
## 環境
Python版本: Python 3.10.9
```cmd!
pip install -r requirements.txt
```
## 模型訓練
在train.ipynb中運行以下的指令
``` cmd!
!python run.py
```
## 模型預測
在train.ipynb中運行以下的指令
```cmd!
!python run.py test=True test_data='./test.csv'py
```
| 757 |
Markdown
| 19.486486 | 74 | 0.690885 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/actor_critic_model.py
|
from copy import deepcopy
import torch
from torch import nn
from torch.distributions import Categorical
from .utils import neg_log_p, eval_no_grad, Identity, RunningMeanStd
class Mlp(nn.Module):
def __init__(
self,
in_size, hidden_size, out_size=None,
activation: nn.Module = nn.ReLU(),
output_activation: nn.Module = nn.Identity()
):
super().__init__()
model = []
self.sizes = sizes = [in_size] + hidden_size
for x, y in zip(sizes[:-1], sizes[1:]):
model.append(nn.Linear(x, y))
model.append(deepcopy(activation))
if out_size is not None:
model.append(nn.Linear(sizes[-1], out_size))
self.model = nn.Sequential(*model)
self.out_act = output_activation
def forward(self, x):
return self.out_act(self.model(x))
def set_spectral_norm(self):
for i, layer in enumerate(self.model):
if isinstance(layer, nn.Linear):
self.model[i] = nn.utils.spectral_norm(layer)
class ActorCriticModel(nn.Module):
def __init__(self, config):
super().__init__()
self.obs_size = config['num_obs']
self.action_size = config['num_actions']
self.value_size = config['num_values']
self.actor = self.Actor(self.obs_size, config['actor_mlp'], self.action_size)
self.critic = self.Critic(self.obs_size, config['critic_mlp'], self.value_size)
normalize = lambda x: (x - x.mean()) / (x.std() + 1e-8)
self.normalize_obs = RunningMeanStd(self.obs_size) if config['normalize_obs'] else Identity()
self.normalize_value = RunningMeanStd(self.value_size) if config['normalize_value'] else Identity()
self.normalize_advantage = normalize if config['normalize_advantage'] else Identity()
self.preproc_advantage = lambda x: self.normalize_advantage(x.mean(dim=-1))
class Actor(nn.Module):
def __init__(self, obs_size, mlp_size, action_size):
super().__init__()
self.mu = Mlp(obs_size, mlp_size, 9, output_activation=nn.Softmax())
def forward(self, x):
return self.mu(x)
class Critic(nn.Module):
def __init__(self, obs_size, mlp_size, value_size):
super().__init__()
self.value = Mlp(obs_size, mlp_size, value_size)
def forward(self, x):
return self.value(x)
@eval_no_grad
def get_action(self, obs, train=False, test=False):
obs = self.normalize_obs(obs)
mu = self.actor(obs)
if train:
return mu
elif test:
return torch.argmax(mu, dim=-1)
else:
action_dist = Categorical(mu)
action = action_dist.sample()
return action, -action_dist.log_prob(action)
@eval_no_grad
def get_value(self, obs, train=False):
obs = self.normalize_obs(obs)
value = self.critic(obs)
if train:
return value
else:
return self.normalize_value(value, unnorm=True)
| 3,072 |
Python
| 33.144444 | 107 | 0.58724 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/pg_agent.py
|
import torch
from .ppo_agent import PPOAgent
torch.autograd.set_detect_anomaly(True)
class PGAgent(PPOAgent):
def _actor_loss(self, _, neglogp, reward):
return (neglogp * reward).sum()
def _critic_loss(self, old_value, value, return_batch):
return 0
| 278 |
Python
| 20.461537 | 59 | 0.679856 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/experience.py
|
import gym
import torch
import numpy as np
class ExperienceBuffer:
def __init__(self, shape, env_info, device):
self.shape = tuple(shape)
self.num_obs = env_info['num_obs']
self.num_actions = env_info['num_actions']
self.num_values = env_info['num_values']
self.device = device
self.datas = {}
self.create_buffer()
def create_buffer(self):
self.add_buffer('obs', self.num_obs)
self.add_buffer('reward', self.num_values)
self.add_buffer('return', self.num_values)
self.add_buffer('value', self.num_values)
self.add_buffer('action', self.num_actions)
self.add_buffer('neglogp')
self.add_buffer('done', dtype=torch.long)
self.add_buffer('next_obs', self.num_obs)
self.add_buffer('next_value', self.num_values)
# def create_buffer(self):
# self.datas['obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['reward'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['return'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
# self.datas['action'] = torch.zeros([*self.shape, self.num_actions], device=self.device)
# self.datas['neglogp'] = torch.zeros([*self.shape], device=self.device)
# self.datas['done'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
# self.datas['next_obs'] = torch.zeros([*self.shape, self.num_obs], device=self.device)
# self.datas['next_value'] = torch.zeros([*self.shape, self.num_values], device=self.device)
def add_buffer(self, name, shape=(), dtype=torch.float):
shape = (shape,) if isinstance(shape, int) else tuple(shape)
self.datas[name] = torch.zeros(self.shape + shape, dtype=dtype, device=self.device)
def update_data(self, *args, **kwargs):
raise NotImplementedError
def get_data(self, *args, **kwargs):
raise NotImplementedError
class VecEnvExperienceBuffer(ExperienceBuffer):
def update_data(self, key, idx, value):
self.datas[key][idx] = value
def get_data(self):
batch_dict = {}
for k, v in self.datas.items():
s = v.shape
batch_dict[k] = v.transpose(0, 1).reshape(s[0] * s[1], *s[2:])
return batch_dict
class AsyncExperienceBuffer(ExperienceBuffer):
def __init__(self, num_actors, env_info, max_size, device):
super().__init__([max_size * 2], env_info, device)
self.size = max_size
self.run_idx = torch.zeros([num_actors], dtype=torch.long, device=self.device)
def create_buffer(self):
super().create_buffer()
self.status = torch.zeros(self.shape, dtype=torch.long, device=self.device)
self.datas['steps'] = torch.zeros([*self.shape], dtype=torch.long, device=self.device)
def update_data(self, **kwargs):
raise NotImplementedError
def pre_update_data(self, env_ids, datas: dict):
idx = (self.status == 0).nonzero().squeeze(-1)[:len(env_ids)]
self.run_idx[env_ids] = idx
for k, v in datas.items():
self.datas[k][idx] = v
self.status[idx] = -1
def post_update_data(self, env_ids, datas: dict):
idx = self.run_idx[env_ids]
for k, v in datas.items():
self.datas[k][idx] = v
self.status[self.status > 0] += 1
self.status[idx] = 1
# ToDo: check is needed
self.status[idx[datas['steps'] <= 0]] = 0
def full(self):
return torch.sum(self.status > 0) >= self.size
def get_data(self):
if not self.full():
raise
idx = self.status.topk(self.size, sorted=False)[1]
data = {k: v[idx] for k, v in self.datas.items()}
self.status[idx] = 0
return data
if __name__ == '__main__':
T = torch.Tensor
TL = lambda x: T(x).to(dtype=torch.long)
Z = torch.zeros
R = torch.rand
env_info = {'action_space': Z(2), 'observation_space': Z(3), 'value_size': 1}
buf = AsyncExperienceBuffer(5, env_info, 5, 'cpu')
buf.pre_update_data(TL([1, 3]), {'obs': T([[1, 1, 1], [2, 2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
buf.post_update_data(TL([1, 3]), {'action': T([[1, 1], [2, 2]])})
buf.pre_update_data(TL([2, 0]), {'obs': T([[3, 3, 3], [4, 4, 4]])})
buf.post_update_data(TL([2, 0]), {'action': T([[3, 3], [4, 4]])})
print(buf.run_idx)
print(buf.datas['obs'], buf.datas['action'])
print(buf.status)
print(buf.get_data())
print(buf.status)
| 4,782 |
Python
| 38.204918 | 100 | 0.587411 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/utils.py
|
import numpy as np
import torch
from torch import nn
from utils.torch_utils import to_torch_size
def eval_no_grad(func):
def _eval_no_grad(self, *args, **kwargs):
if not self.training:
with torch.no_grad():
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return _eval_no_grad
class Identity(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, x, **kwargs):
return x
def neg_log_p(x, mean, log_std):
return 0.5 * (((x - mean) / torch.exp(log_std)) ** 2).sum(dim=-1) \
+ 0.5 * np.log(2.0 * np.pi) * x.size()[-1] \
+ log_std.sum(dim=-1)
class RunningMeanStd(nn.Module):
def __init__(self, in_size, eps=1e-05):
super().__init__()
self.in_size = to_torch_size(in_size)
self.eps = eps
self.register_buffer("mean", torch.zeros(in_size, dtype=torch.float64))
self.register_buffer("var", torch.ones(in_size, dtype=torch.float64))
self.register_buffer("count", torch.ones((), dtype=torch.float64))
def _update(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
m_a = self.var * self.count
m_b = batch_var * batch_count
m2 = m_a + m_b + delta**2 * self.count * batch_count / (self.count + batch_count)
self.count += batch_count
self.mean[:] = self.mean + delta * batch_count / self.count
self.var[:] = m2 / self.count
def forward(self, x, unnorm=False):
if x.nelement() == 0:
return x
if self.training and not unnorm:
axis = list(range(x.ndim - len(self.in_size)))
mean = x.mean(axis)
var = x.var(axis, correction=0)
count = x.shape[:-1].numel()
self._update(mean, var, count)
if unnorm:
y = torch.clamp(x, min=-5.0, max=5.0)
y = torch.sqrt(self.var.float() + self.eps) * y + self.mean.float()
else:
y = (x - self.mean.float()) / torch.sqrt(self.var.float() + self.eps)
y = torch.clamp(y, min=-5.0, max=5.0)
return y
| 2,193 |
Python
| 29.472222 | 89 | 0.545372 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/dataset.py
|
import torch
class Dataset:
def __init__(self, batch_size, minibatch_size, device):
self.batch_size = batch_size
self.minibatch_size = minibatch_size
self.device = device
# self.size = self.batch_size // self.minibatch_size
self._idx_buf = torch.randperm(batch_size)
def update(self, datas):
self.datas = datas
def __len__(self):
return self.batch_size // self.minibatch_size
def __getitem__(self, idx):
start = idx * self.minibatch_size
end = (idx + 1) * self.minibatch_size
sample_idx = self._idx_buf[start:end]
data_dict = {}
for k, v in self.datas.items():
if v is not None:
data_dict[k] = v[sample_idx].detach()
if end >= self.batch_size:
self._shuffle_idx_buf()
return data_dict
def _shuffle_idx_buf(self):
self._idx_buf[:] = torch.randperm(self.batch_size)
| 969 |
Python
| 26.714285 | 60 | 0.55934 |
BeanSamuel/Exchange-Rate-Prediction-RL/learning/ppo_agent.py
|
import os
import shutil
import time
import torch
from torch import optim
from torch.distributions import Categorical
from .utils import neg_log_p
from .dataset import Dataset
from .experience import VecEnvExperienceBuffer
from .actor_critic_model import ActorCriticModel
from utils.runner import Runner
torch.autograd.set_detect_anomaly(True)
class PPOAgent:
def __init__(self, params, env):
print(f'\n------------------------------------ {self.__class__.__name__} ------------------------------------')
self.config = config = params['config']
self.device = config.get('device', 'cuda:0')
# save
self.save_freq = config.get('save_frequency', 0)
# normalize
self.normalize_obs = self.config['normalize_obs']
self.normalize_value = self.config.get('normalize_value', False)
self.normalize_advantage = config['normalize_advantage']
# learning
self.lr = config['learning_rate']
self.num_actors = env.num_envs
self.horizon_length = config['horizon_length']
self.seq_len = self.config.get('seq_length', 4)
self.max_epochs = self.config.get('max_epochs', -1)
self.mini_epochs_num = self.config['mini_epochs']
self.minibatch_size = self.config.get('minibatch_size')
self.batch_size = self.horizon_length * self.num_actors
assert (self.batch_size % self.minibatch_size == 0)
self.e_clip = config['e_clip']
self.clip_action = self.config.get('clip_actions', True)
self.clip_value = config['clip_value']
self.tau = self.config['tau']
self.gamma = self.config['gamma']
self.critic_loss_coef = config['critic_loss_coef']
self.bounds_loss_coef = self.config.get('bounds_loss_coef', None)
# env
self.env = env
self.build_env_info()
# model
self.build_model(params['model'])
self.optimizer = optim.AdamW(self.model.parameters(), self.lr, eps=1e-08, weight_decay=0)
# buffers
self.dataset = Dataset(self.batch_size, self.minibatch_size, self.device)
self.experience_buffer = VecEnvExperienceBuffer([self.horizon_length, self.num_actors], self.env_info, self.device)
# counter
self.epoch_num = 0
self.env.agent = self
def build_env_info(self):
self.env_info = dict(
num_obs=self.env.num_obs,
num_actions=self.env.num_actions,
num_values=self.env.num_values,
)
def build_model(self, config):
model = config.get('model', ActorCriticModel)
config['normalize_obs'] = self.normalize_obs
config['normalize_value'] = self.normalize_value
config['normalize_advantage'] = self.normalize_advantage
config.update(self.env_info)
self.model = model(config).to(self.device)
print(self.model)
def set_eval(self):
self.model.eval()
def set_train(self):
self.model.train()
def preproc_action(self, action):
return action.clone()
def env_step(self, action):
_action = self.preproc_action(action)
obs, reward, done, infos = self.env.step(_action)
obs = obs.to(self.device)
reward = reward.to(self.device)
done = done.to(self.device)
for k in infos.keys():
if isinstance(infos[k], torch.Tensor):
infos[k] = infos[k].to(self.device)
return obs, reward, done, infos
def env_reset_done(self):
obs = self.env.reset_done()
return obs.to(self.device)
def play_steps(self):
for n in range(self.horizon_length):
obs = self.env_reset_done()
self.experience_buffer.update_data('obs', n, obs)
value = self.model.get_value(obs)
action, neglogp = self.model.get_action(obs)
obs, reward, done, infos = self.env_step(action)
next_value = self.model.get_value(obs)
self.experience_buffer.update_data('value', n, value)
self.experience_buffer.update_data('action', n, action)
self.experience_buffer.update_data('neglogp', n, neglogp)
self.experience_buffer.update_data('reward', n, reward)
self.experience_buffer.update_data('next_obs', n, obs)
self.experience_buffer.update_data('done', n, done)
self.experience_buffer.update_data('next_value', n, next_value)
self.post_step(n, infos)
mb_done = self.experience_buffer.datas['done']
mb_value = self.experience_buffer.datas['value']
mb_next_value = self.experience_buffer.datas['next_value']
mb_reward = self.experience_buffer.datas['reward']
mb_value, mb_return, mb_adv = self.compute_return(mb_done, mb_value, mb_reward, mb_next_value)
self.experience_buffer.datas['value'] = mb_value
self.experience_buffer.datas['return'] = mb_return
self.experience_buffer.datas['advantage'] = mb_adv
batch_dict = self.experience_buffer.get_data()
return batch_dict
def train_epoch(self):
self.set_eval()
play_time_start = time.time()
batch_dict = self.play_steps()
play_time_end = time.time()
update_time_start = time.time()
self.set_train()
self.curr_frames = self.batch_size
self.dataset.update(batch_dict)
for mini_ep in range(0, self.mini_epochs_num):
for i in range(len(self.dataset)):
self.update(self.dataset[i])
self.post_epoch()
update_time_end = time.time()
play_time = play_time_end - play_time_start
update_time = update_time_end - update_time_start
total_time = update_time_end - play_time_start
return play_time, update_time, total_time
def train(self):
self.last_mean_rewards = -100500
total_time = 0
self.frame = 0
while True:
self.epoch_num += 1
play_time, update_time, epoch_time = self.train_epoch()
total_time += epoch_time
scaled_time = epoch_time
scaled_play_time = play_time
curr_frames = self.curr_frames
self.frame += curr_frames
fps_step = curr_frames / scaled_play_time
fps_total = curr_frames / scaled_time
print(f'fps step: {fps_step:.1f} fps total: {fps_total:.1f}')
if self.save_freq > 0:
if self.epoch_num % self.save_freq == 0:
Runner.save_model('Epoch' + str(self.epoch_num))
if self.epoch_num > self.max_epochs:
print('MAX EPOCHS NUM!')
return
def test(self):
self.set_eval()
score = self.env.test()
print('total profit:', score)
def post_step(self, n, infos):
pass
def post_epoch(self):
Runner.logger.upload()
if self.epoch_num % 10 == 0:
self.env.test()
def compute_return(self, done, value, reward, next_value):
last_gae_lam = 0
adv = torch.zeros_like(reward)
done = done.float()
for t in reversed(range(self.horizon_length)):
not_done = 1.0 - done[t]
not_done = not_done.unsqueeze(1)
delta = reward[t] + self.gamma * next_value[t] - value[t]
last_gae_lam = delta + self.gamma * self.tau * not_done * last_gae_lam
adv[t] = last_gae_lam
returns = self.model.normalize_value(value + adv)
value = self.model.normalize_value(value)
adv = self.model.preproc_advantage(adv)
return value, returns, adv
def update(self, input_dict):
obs = input_dict['obs']
action = input_dict['action']
old_value = input_dict['value']
old_neglogp = input_dict['neglogp']
advantage = input_dict['advantage']
returns = input_dict['return']
mu = self.model.get_action(obs, train=True)
neglogp = -Categorical(mu).log_prob(action.squeeze(-1))
value = self.model.get_value(obs, train=True)
# print(mu.shape, action.shape)
# print(neglogp.shape)
# print(torch.exp(old_neglogp[0] - neglogp[0]))
a_loss = self._actor_loss(old_neglogp, neglogp, advantage)
c_loss = self._critic_loss(old_value, value, returns)
b_loss = self._bound_loss(mu)
loss = a_loss + self.critic_loss_coef * c_loss + self.bounds_loss_coef * b_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
Runner.logger.log({
'loss/total': loss,
'loss/actor': a_loss,
'loss/critic': c_loss,
'value/': value,
})
def log_results(self, **kwargs):
pass
def _actor_loss(self, old_neglogp, neglogp, advantage):
ratio = torch.exp(old_neglogp - neglogp).clamp_max(2) # prevent too large loss
surr1 = advantage * ratio
surr2 = advantage * torch.clamp(ratio, 1.0 - self.e_clip, 1.0 + self.e_clip)
a_loss = torch.max(-surr1, -surr2)
return a_loss.mean()
def _critic_loss(self, old_value, value, return_batch):
if self.clip_value:
value_pred_clipped = old_value + (value - old_value).clamp(-self.e_clip, self.e_clip)
value_losses = (value - return_batch) ** 2
value_losses_clipped = (value_pred_clipped - return_batch)**2
c_loss = torch.max(value_losses, value_losses_clipped)
else:
c_loss = (return_batch - value) ** 2
return c_loss.mean()
def _bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.0
mu_loss_high = torch.maximum(mu - soft_bound, torch.tensor(0, device=self.device)) ** 2
mu_loss_low = torch.minimum(mu + soft_bound, torch.tensor(0, device=self.device)) ** 2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss.mean()
def save(self):
return self.model.state_dict()
def load(self, datas):
self.model.load_state_dict(datas)
| 10,238 |
Python
| 33.708474 | 123 | 0.582926 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/config.yaml
|
experiment: ''
num_envs: ''
seed: 42
torch_deterministic: False
rl_device: 'cpu'
# set the maximum number of learning iterations to train for. overrides default per-environment setting
max_iterations: ''
# RLGames Arguments
# test - if set, run policy in inference mode (requires setting checkpoint to load)
test: False
# used to set checkpoint path
checkpoint: ''
# disables rendering
headless: False
# enables native livestream
enable_livestream: False
# timeout for MT script
mt_timeout: 30
# set default task and default training config based on task
defaults:
- task: Noob
- train: ${task}PPO
- hydra/job_logging: disabled
# set the directory where the output files get saved
hydra:
output_subdir: null
run:
dir: .
render: False
debug: False
wandb: True
save: True
profile: False
test_data: ''
| 821 |
YAML
| 18.116279 | 103 | 0.74056 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/task/Noob.yaml
|
# used to create the object
name: Noob
# if given, will override the device setting in gym.
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: './train.csv'
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
# frame_bound: [100, 1000]
frame_bound: [1850, 2850]
# frame_bound: [10, 800]
#
| 337 |
YAML
| 23.142855 | 60 | 0.643917 |
BeanSamuel/Exchange-Rate-Prediction-RL/cfg/train/NoobPPO.yaml
|
name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp: [256, 256]
critic_mlp: [256, 256]
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: True
normalize_value: False
normalize_advantage: True
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.9
gamma: 0.9
e_clip: 0.2
clip_value: False
learning_rate: 1e-3
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
| 712 |
YAML
| 19.970588 | 101 | 0.627809 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/torch_utils.py
|
from typing import Optional, Sequence
import torch
def to_torch_size(*size) -> torch.Size:
if len(size) == 1 and isinstance(size[0], Sequence):
torch_size = size[0]
else:
torch_size = list(size)
return torch.Size(torch_size)
| 255 |
Python
| 22.272725 | 56 | 0.647059 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/wandb_logger.py
|
import torch
import wandb
# class WandbLogger:
# def __init__(self, project, run_name, log=True):
# self.data = {}
# self.data_cnt = {}
# self.is_log = log
# if log:
# wandb.init(project=project)
# wandb.run.name = run_name
# wandb.run.save()
# def stop(self):
# wandb.finish()
# def log(self, datas: dict):
# if self.is_log:
# for k, v in datas.items():
# if isinstance(v, torch.Tensor):
# if v.nelement == 0:
# v = torch.nan
# v = v.mean().item()
# n = self.data_cnt.get(k, 0)
# x = self.data.get(k, 0)
# self.data_cnt[k] = n + 1
# self.data[k] = x * n / (n+1) + v / (n+1)
# def upload(self):
# if self.is_log:
# wandb.log(self.data)
# self.data.clear()
# self.data_cnt.clear()
class WandbLogger:
def __init__(self, project, run_name, log=True):
pass
def log(self, datas: dict):
pass
def upload(self):
pass
def stop(self):
pass
| 1,176 |
Python
| 25.155555 | 58 | 0.443027 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/materials.py
|
import numpy as np
from pxr import Sdf
from omni.isaac.core.materials import omni_pbr
class OmniPBR(omni_pbr.OmniPBR):
def __init__(self, name, prim_path=None, color: list = None, opacity=None, reflection=None):
if prim_path is None:
prim_path = '/World/Looks/' + name
super().__init__(prim_path, name, color=color)
if reflection is not None:
self.set_reflection_roughness(1 - reflection)
if opacity is not None:
self.set_opacity(opacity)
def set_opacity(self, value: float):
enable_opacity = value < 1
if self.shaders_list[0].GetInput("enable_opacity").Get() is None:
self.shaders_list[0].CreateInput("enable_opacity", Sdf.ValueTypeNames.Bool).Set(enable_opacity)
else:
self.shaders_list[0].GetInput("enable_opacity").Set(enable_opacity)
if self.shaders_list[0].GetInput("opacity_constant").Get() is None:
self.shaders_list[0].CreateInput("opacity_constant", Sdf.ValueTypeNames.Float).Set(value)
else:
self.shaders_list[0].GetInput("opacity_constant").Set(value)
def set_color(self, color) -> None:
super().set_color(np.array(color))
| 1,216 |
Python
| 39.566665 | 107 | 0.63898 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/runner.py
|
import os
import time
import torch
import shutil
import random
import numpy as np
from datetime import datetime
from utils.hydra_cfg.hydra_utils import *
from utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from utils.wandb_logger import WandbLogger
class StopException(Exception):
pass
class _Runner:
def __init__(self):
pass
def init(self, cfg):
self.cfg_dict = omegaconf_to_dict(cfg)
self.test = cfg.test
self.checkpoint = cfg.checkpoint
self.clear_cmd()
self.task_name = cfg.task.name
self.start_time = datetime.now().strftime('%Y%m%d-%H%M%S')
# create save dir
self.save = cfg.save
self.run_name = self.start_time
self.task_dir = os.path.join('./runs', self.task_name)
if self.save:
self.run_dir = os.path.join(self.task_dir, self.run_name)
os.makedirs(self.run_dir, exist_ok=True)
# set seed
cfg.seed = 42
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed_all(cfg.seed)
np.random.seed(cfg.seed)
random.seed(cfg.seed)
# logger
self.logger = WandbLogger(self.task_name, self.start_time, cfg.wandb)
# backup code
if self.save:
code_path = './learning'
if code_path is not None:
shutil.copytree(code_path, os.path.join(self.run_dir, 'codes'))
# dump config dict
if self.save:
with open(os.path.join(self.run_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(cfg))
# get env & agent
from utils.task_util import get_env_agent
self.env, self.agent = get_env_agent(self.cfg_dict)
if self.test:
if self.checkpoint == '':
self.checkpoint = max(os.listdir(self.task_dir))
# load checkpoint
if self.checkpoint:
self.load_model(self.checkpoint)
if cfg.render:
self.write_cmd('render')
def run(self):
try:
if self.test:
self.agent.test()
else:
self.agent.train()
self.stop()
except StopException:
pass
def stop(self):
self.save_model('FinalEpoch')
self.logger.stop()
raise StopException
def read_cmd(self):
try:
with open('./controller', 'r') as f:
return f.read().rstrip()
except:
return ''
def write_cmd(self, cmd):
try:
with open('./controller', 'w') as f:
return f.write(cmd)
except:
pass
def clear_cmd(self):
open('./controller', 'w').close()
def close(self):
pass
def control(self):
cmd = self.read_cmd()
if cmd == 'save':
self.clear_cmd()
self.save_model(f'Epoch{self.agent.epoch_num}')
elif cmd == 'stop':
self.stop()
elif cmd == 'record':
self.clear_cmd()
self.env.record(f'Epoch{self.agent.epoch_num}')
elif cmd == 'close':
self.stop()
self.close()
self.env.render = cmd == 'render'
def get_save_dir(self, sub_dir, epoch_dir=False):
if epoch_dir:
save_dir = os.path.join(self.run_dir, sub_dir, f'Epoch{self.agent.epoch_num}')
else:
save_dir = os.path.join(self.run_dir, sub_dir)
os.makedirs(save_dir, exist_ok=True)
return save_dir
def save_model(self, name):
if self.save:
path = os.path.join(self.get_save_dir('model'), name)
torch.save({'agent': self.agent.save(), 'env': self.env.save()}, path)
print(f'Save model to {path}')
def load_model(self, name, epoch=None):
epoch = 'FinalEpoch' if epoch is None else f'Epoch{epoch}'
model_dir = os.path.join(self.task_dir, name, 'model', epoch)
datas = torch.load(model_dir)
self.agent.load(datas['agent'])
self.env.load(datas['env'])
Runner = _Runner()
| 4,120 |
Python
| 26.291391 | 90 | 0.546359 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/rotation_utils.py
|
import sys
from torch.autograd import Variable
import torch.distributed.algorithms
sys.path.append('/home/hardy/.local/share/ov/pkg/isaac_sim-2022.2.1/exts/omni.isaac.core')
import numpy as np
from numpy import pi, sin, cos
import plotly.express as px
import plotly.io as pio
from utils.torch_utils import *
pio.renderers.default = "browser"
# auto-shaping
def ash(func, x, in_size):
shape = x.shape[:-1]
return func(x.view(shape + (-1, in_size))).view(shape + (-1,))
@torch.jit.script
def normalize(x, eps: float = 1e-9):
return x / x.norm(p=2, dim=-1).clamp(min=eps, max=None).unsqueeze(-1)
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
def rad2deg(radian_value, device=None):
return torch.rad2deg(radian_value).float().to(device)
def deg2rad(degree_value, device=None):
return torch.deg2rad(degree_value).float().to(device)
def zero_pos(shape, device=None):
return torch.zeros(to_torch_size(shape) + (3,), device=device)
def zero_pos_like(x):
return zero_pos(x.shape[:-1], x.device)
def full_pos(shape, value, device=None):
x = torch.zeros(to_torch_size(shape) + (3,), device=device)
x[:] = torch.tensor(value, device=device)
return x
def full_pos_like(x, value):
return full_pos(x.shape[:-1], value, x.device)
def identity_quat(shape, device=None):
q = torch.zeros(to_torch_size(shape) + (4,), device=device)
q[..., 0] = 1
return q
def identity_quat_like(x):
return identity_quat(x.shape[:-1], x.device)
@torch.jit.script
def quat_unit(a):
return normalize(a)
# @torch.jit.script
# def quat_mul_unnorm(a, b):
# shape = a.shape
# a = a.reshape(-1, 4)
# b = b.reshape(-1, 4)
#
# w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
# w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
# ww = (z1 + x1) * (x2 + y2)
# yy = (w1 - y1) * (w2 + z2)
# zz = (w1 + y1) * (w2 - z2)
# xx = ww + yy + zz
# qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
# w = qq - ww + (z1 - y1) * (y2 - z2)
# x = qq - xx + (x1 + w1) * (x2 + w2)
# y = qq - yy + (w1 - x1) * (y2 + z2)
# z = qq - zz + (z1 + y1) * (w2 - x2)
# quat = torch.stack([w, x, y, z], dim=-1).view(shape)
#
# return quat
# @torch.jit.script
# def quat_inverse(a):
# shape = a.shape
# a = a.reshape(-1, 4)
# return torch.cat((a[..., 0:1], -a[..., 1:]), dim=-1).view(shape)
@torch.jit.script
def quat_mul_unnorm(a, b):
w1, x1, y1, z1 = a[..., 0], a[..., 1], a[..., 2], a[..., 3]
w2, x2, y2, z2 = b[..., 0], b[..., 1], b[..., 2], b[..., 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
quat = torch.stack([w, x, y, z], dim=-1)
return quat
@torch.jit.script
def quat_inverse(a):
a = a.clone()
a[..., 1:] *= -1
return a
@torch.jit.script
def quat_rotate(q, v):
q_w = q[..., 0:1]
q_vec = q[..., 1:]
a = v * (2.0 * q_w ** 2 - 1.0)
b = torch.cross(q_vec, v, dim=-1) * q_w * 2.0
c = q_vec * torch.sum(q_vec * v, dim=-1, keepdim=True) * 2.0
return a + b + c
@torch.jit.script
def quat_rotate_inverse(q, v):
q_w = q[..., 0].unsqueeze(-1)
q_vec = q[..., 1:]
a = v * (2.0 * q_w ** 2 - 1.0)
b = torch.cross(q_vec, v, dim=-1) * q_w * 2.0
c = q_vec * torch.sum(q_vec * v, dim=-1, keepdim=True) * 2.0
return a - b + c
@torch.jit.script
def quat_mul(q0, q1):
return quat_unit(quat_mul_unnorm(q0, q1))
@torch.jit.script
def quat_div(x, y):
return quat_mul(quat_inverse(y), x)
@torch.jit.script
def quat_diff_rad(a, b):
eps = 1e-5
b_conj = quat_inverse(b)
mul = quat_mul_unnorm(a, b_conj)
# 2 * torch.acos(torch.abs(mul[..., -1]))
return 2.0 * torch.asin(torch.clamp(torch.norm(mul[..., 1:], p=2, dim=-1), max=1-eps, min=eps-1))
@torch.jit.script
def quat_to_angle_axis(q):
# computes axis-angle representation from quaternion q
# q must be normalized
min_theta = 1e-5
qw, qx, qy, qz = 0, 1, 2, 3
sin_theta = torch.sqrt(1 - q[..., qw] * q[..., qw])
angle = 2 * torch.acos(q[..., qw])
angle = normalize_angle(angle)
sin_theta_expand = sin_theta.unsqueeze(-1)
axis = q[..., qx:] / sin_theta_expand
mask = sin_theta > min_theta
default_axis = torch.zeros_like(axis)
default_axis[..., qw] = 1
angle = torch.where(mask, angle, torch.zeros_like(angle))
mask_expand = mask.unsqueeze(-1)
axis = torch.where(mask_expand, axis, default_axis)
return angle, axis
@torch.jit.script
def quat_from_angle_axis(angle, axis):
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def angle_axis_to_exp_map(angle, axis):
# compute exponential map from axis-angle
angle_expand = angle.unsqueeze(-1)
exp_map = angle_expand * axis
return exp_map
@torch.jit.script
def quat_to_exp_map(q):
eps = 1e-5
qw = q[..., 0, None].clamp(-1+eps, 1-eps)
q_axis = q[..., 1:]
angle = normalize_angle(2 * qw.acos())
axis = q_axis / torch.sqrt(1 - qw ** 2)
return angle * axis
# @torch.jit.script
# def quat_to_exp_map(q):
# # compute exponential map from quaternion
# # q must be normalized
# angle, axis = quat_to_angle_axis(q)
# exp_map = angle_axis_to_exp_map(angle, axis)
# return exp_map
# @torch.jit.script
# def exp_map_to_angle_axis(exp_map):
# min_theta = 1e-5
#
# angle = torch.norm(exp_map, dim=-1)
# angle_exp = torch.unsqueeze(angle, dim=-1)
# axis = exp_map / angle_exp
# angle = normalize_angle(angle)
#
# default_axis = torch.zeros_like(exp_map)
# default_axis[..., -1] = 1
#
# mask = angle > min_theta
# angle = torch.where(mask, angle, torch.zeros_like(angle))
# mask_expand = mask.unsqueeze(-1)
# axis = torch.where(mask_expand, axis, default_axis)
#
# return angle, axis
# @torch.jit.script
# def exp_map_to_quat(exp_map):
# angle, axis = exp_map_to_angle_axis(exp_map)
# q = quat_from_angle_axis(angle, axis)
# return q
@torch.jit.script
def exp_map_to_quat(exp_map):
eps = 1e-12
angle = torch.norm(exp_map, dim=-1, keepdim=True)
axis = exp_map / (angle + eps)
theta = normalize_angle(angle) / 2
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return quat_unit(torch.cat([w, xyz], dim=-1))
@torch.jit.script
def quat_to_tan_norm(q):
# represents a rotation using the tangent and normal vectors
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def quat_from_rotation_matrix(m):
m = m.unsqueeze(0)
diag0 = m[..., 0, 0]
diag1 = m[..., 1, 1]
diag2 = m[..., 2, 2]
# Math stuff.
w = (((diag0 + diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
x = (((diag0 - diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
y = (((-diag0 + diag1 - diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
z = (((-diag0 - diag1 + diag2 + 1.0) / 4.0).clamp(0.0, None)) ** 0.5
# Only modify quaternions where w > x, y, z.
c0 = (w >= x) & (w >= y) & (w >= z)
x[c0] *= (m[..., 2, 1][c0] - m[..., 1, 2][c0]).sign()
y[c0] *= (m[..., 0, 2][c0] - m[..., 2, 0][c0]).sign()
z[c0] *= (m[..., 1, 0][c0] - m[..., 0, 1][c0]).sign()
# Only modify quaternions where x > w, y, z
c1 = (x >= w) & (x >= y) & (x >= z)
w[c1] *= (m[..., 2, 1][c1] - m[..., 1, 2][c1]).sign()
y[c1] *= (m[..., 1, 0][c1] + m[..., 0, 1][c1]).sign()
z[c1] *= (m[..., 0, 2][c1] + m[..., 2, 0][c1]).sign()
# Only modify quaternions where y > w, x, z.
c2 = (y >= w) & (y >= x) & (y >= z)
w[c2] *= (m[..., 0, 2][c2] - m[..., 2, 0][c2]).sign()
x[c2] *= (m[..., 1, 0][c2] + m[..., 0, 1][c2]).sign()
z[c2] *= (m[..., 2, 1][c2] + m[..., 1, 2][c2]).sign()
# Only modify quaternions where z > w, x, y.
c3 = (z >= w) & (z >= x) & (z >= y)
w[c3] *= (m[..., 1, 0][c3] - m[..., 0, 1][c3]).sign()
x[c3] *= (m[..., 2, 0][c3] + m[..., 0, 2][c3]).sign()
y[c3] *= (m[..., 2, 1][c3] + m[..., 1, 2][c3]).sign()
return quat_unit(torch.stack([w, x, y, z], dim=-1)).squeeze(0)
@torch.jit.script
def quat_from_dir(v):
u = torch.zeros_like(v)
u[..., 2] = 1
xyz = torch.cross(u, v, dim=-1)
w = torch.sqrt((u ** 2).sum(-1) * (v ** 2).sum(-1)) + (u * v).sum(-1)
q = quat_unit(torch.cat([w.unsqueeze(-1), xyz], dim=-1))
q[q.abs().sum(-1) < 1e-6, [1]] = 1
return q
@torch.jit.script
def quat_to_tan_norm(q):
ref_tan = torch.zeros_like(q[..., 0:3])
ref_tan[..., 0] = 1
tan = quat_rotate(q, ref_tan)
ref_norm = torch.zeros_like(q[..., 0:3])
ref_norm[..., -1] = 1
norm = quat_rotate(q, ref_norm)
norm_tan = torch.cat([tan, norm], dim=len(tan.shape) - 1)
return norm_tan
@torch.jit.script
def exp_map_mul(e0, e1):
shape = e0.shape[:-1] + (-1,)
q0 = exp_map_to_quat(e0.reshape(-1, 3))
q1 = exp_map_to_quat(e1.reshape(-1, 3))
return quat_to_exp_map(quat_mul(q0, q1)).view(shape)
@torch.jit.script
def exp_map_div(e0, e1):
shape = e0.shape[:-1] + (-1,)
q0 = exp_map_to_quat(e0.reshape(-1, 3))
q1 = exp_map_to_quat(e1.reshape(-1, 3))
return quat_to_exp_map(quat_div(q0, q1)).view(shape)
@torch.jit.script
def exp_map_diff_rad(e0, e1):
return quat_diff_rad(exp_map_to_quat(e0), exp_map_to_quat(e1))
@torch.jit.script
def lerp(p0, p1, t):
return (1 - t) * p0 + t * p1
# @torch.jit.script
def slerp(q0, q1, t):
qw, qx, qy, qz = 0, 1, 2, 3
cos_half_theta = q0[..., qw] * q1[..., qw] \
+ q0[..., qx] * q1[..., qx] \
+ q0[..., qy] * q1[..., qy] \
+ q0[..., qz] * q1[..., qz]
neg_mask = cos_half_theta < 0
q1 = q1.clone()
q1[neg_mask] = -q1[neg_mask]
cos_half_theta = torch.abs(cos_half_theta)
cos_half_theta = torch.unsqueeze(cos_half_theta, dim=-1)
half_theta = torch.acos(cos_half_theta)
sin_half_theta = torch.sqrt(1.0 - cos_half_theta * cos_half_theta)
ratioA = torch.sin((1 - t) * half_theta) / sin_half_theta
ratioB = torch.sin(t * half_theta) / sin_half_theta
new_q_w = ratioA * q0[..., qw:qw + 1] + ratioB * q1[..., qw:qw + 1]
new_q_x = ratioA * q0[..., qx:qx + 1] + ratioB * q1[..., qx:qx + 1]
new_q_y = ratioA * q0[..., qy:qy + 1] + ratioB * q1[..., qy:qy + 1]
new_q_z = ratioA * q0[..., qz:qz + 1] + ratioB * q1[..., qz:qz + 1]
cat_dim = len(new_q_w.shape) - 1
new_q = torch.cat([new_q_w, new_q_x, new_q_y, new_q_z], dim=cat_dim)
new_q = torch.where(torch.abs(sin_half_theta) < 0.001, 0.5 * q0 + 0.5 * q1, new_q)
new_q = torch.where(torch.abs(cos_half_theta) >= 1, q0, new_q)
return new_q
@torch.jit.script
def calc_heading(q):
# calculate heading direction from quaternion
# the heading is the direction on the xy plane
# q must be normalized
ref_dir = torch.zeros_like(q[..., 0:3])
ref_dir[..., 0] = 1
rot_dir = quat_rotate(q, ref_dir)
heading = torch.atan2(rot_dir[..., 1], rot_dir[..., 0])
return heading
@torch.jit.script
def calc_heading_quat(q):
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(heading, axis)
return heading_q
@torch.jit.script
def calc_heading_quat_inv(q):
# calculate heading rotation from quaternion
# the heading is the direction on the xy plane
# q must be normalized
heading = calc_heading(q)
axis = torch.zeros_like(q[..., 0:3])
axis[..., 2] = 1
heading_q = quat_from_angle_axis(-heading, axis)
return heading_q
@torch.jit.script
def normalize_pos(pos):
z = torch.zeros_like(pos)
z[..., 2] = 1
return z * pos.norm(p=2, dim=-1, keepdim=True)
def draw_exp_map(e):
draw_quaternion(exp_map_to_quat(e))
def draw_quaternion(q):
v = torch.Tensor([0, 0, 1]).repeat(len(q), 1)
v = quat_rotate(q, v)
fig = px.scatter_3d(x=v[:, 0], y=v[:, 1], z=v[:, 2])
fig.update_layout(
scene=dict(
xaxis=dict(range=[-1, 1]),
yaxis=dict(range=[-1, 1]),
zaxis=dict(range=[-1, 1]),
)
)
fig.update_scenes(aspectmode='cube')
fig_add_sphere(fig)
fig.show()
def random_quaternion(size):
return exp_map_to_quat((torch.rand([size, 3]) - 0.5) * 2 * torch.pi)
def fig_add_sphere(fig):
theta = np.linspace(0, 2 * pi, 120)
phi = np.linspace(0, pi, 60)
u, v = np.meshgrid(theta, phi)
xs = cos(u) * sin(v)
ys = sin(u) * sin(v)
zs = cos(v)
x, y, z = [], [], []
for t in [theta[10 * k] for k in range(12)]: # meridians:
x.extend(list(cos(t) * sin(phi)) + [None]) # None is inserted to mark the end of a meridian line
y.extend(list(sin(t) * sin(phi)) + [None])
z.extend(list(cos(phi)) + [None])
for s in [phi[6 * k] for k in range(10)]: # parallels
x.extend(list(cos(theta) * sin(s)) + [None]) # None is inserted to mark the end of a parallel line
y.extend(list(sin(theta) * sin(s)) + [None])
z.extend([cos(s)] * 120 + [None])
fig.add_surface(x=xs, y=ys, z=zs,
colorscale=[[0, '#ffffff'], [1, '#ffffff']],
showscale=False, opacity=0.5) # or opacity=1
fig.add_scatter3d(x=x, y=y, z=z, mode='lines', line_width=3, line_color='rgb(10,10,10)')
def _test_exp_map_diff_rad_grad():
n = 10000
print('testing...')
for _ in range(1000):
x = Variable(torch.rand([n, 3]) * 1000, requires_grad=True)
y = exp_map_diff_rad(x, torch.rand([n, 3])).mean()
y.backward()
if x.grad.isnan().any():
print(y)
print('finish')
def _test_exp_map_to_quat_grad():
n = 10000
print('testing...')
for _ in range(1):
x = Variable(torch.rand([n, 3]) * 1000, requires_grad=True)
y = exp_map_to_quat(x).mean()
y.backward()
print(x.grad)
# if x.grad.isnan().any():
# print(y)
print('finish')
def _test_quat_to_exp_map_grad():
n = 10000
print('testing...')
for _ in range(1):
x = Variable(torch.rand([n, 3]), requires_grad=True)
y = exp_map_to_quat(x)
y = quat_to_exp_map(y)
y.mean().backward()
print((y - x).sum())
print(x.grad)
# if x.grad.isnan().any():
# print(y)
print('finish')
def _test_slerp():
n = 15
q0 = random_quaternion(1).repeat(n, 1)
q1 = random_quaternion(1).repeat(n, 1)
t = torch.arange(n).float() / n
q = slerp(q0, q1, t.unsqueeze(-1))
draw_quaternion(q)
if __name__ == '__main__':
_test_quat_to_exp_map_grad()
| 15,284 |
Python
| 26.84153 | 107 | 0.537817 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/control_panel.py
|
import omni.ui as ui
def _preproc_kwargs(kwargs):
for k in kwargs.keys():
if k in ['width', 'height']:
kwargs[k] = ui.Length(kwargs[k])
return kwargs
class ControlPanel:
def __init__(self, name):
self._window = ui.Window(name, auto_resize=True)
self._components = dict()
def __getitem__(self, name):
if isinstance(name, (list, tuple)):
return [self.__getitem__(x) for x in name]
item = self._components.get(name)
if isinstance(item, ui.FloatSlider):
return item.model.get_value_as_float()
elif isinstance(item, ui.CheckBox):
return item.model.get_value_as_bool()
else:
raise IndexError
def __setitem__(self, key, value):
if isinstance(key, (list, tuple)):
for k, v in zip(key, value):
self.__setitem__(k, v)
return
item = self._components.get(key)
if isinstance(item, ui.FloatField):
item.model.set_value(value)
else:
raise IndexError
def add_slider(self, name, **kwargs):
self._components[name] = lambda: ui.FloatSlider(**_preproc_kwargs(kwargs))
def add_float(self, name, **kwargs):
self._components[name] = lambda: ui.FloatField(**_preproc_kwargs(kwargs))
def add_check_box(self, name, **kwargs):
self._components[name] = lambda: ui.CheckBox(**_preproc_kwargs(kwargs))
def build(self):
with self._window.frame:
with ui.VStack(width=150):
for k, v in self._components.items():
ui.Label(k)
self._components[k] = v()
| 1,685 |
Python
| 29.654545 | 82 | 0.558457 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/task_util.py
|
from env import ForexEnv
from learning.ppo_agent import PPOAgent
from learning.pg_agent import PGAgent
def get_env_agent(config):
env_map = {
'Noob': ForexEnv,
}
agent_map = {
'PPOAgent': PPOAgent,
'PGAgent': PGAgent
}
env = env_map[config['task']['name']](config)
agent = agent_map[config['train']['name']](params=config['train']['params'], env=env)
return env, agent
| 426 |
Python
| 20.349999 | 89 | 0.617371 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/utils/dash/live_plot.py
|
import os
import socket
import logging
import threading
import numpy as np
import time
import torch
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dash import Dash, html, dcc, Output, Input
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
pio.renderers.default = "browser"
callback_dict = dict()
def callback(*args, **kwargs):
def wrapped(func):
global callback_dict
callback_dict[func.__name__] = (args, kwargs)
return func
return wrapped
class LivePlot:
def __init__(self, name, titles, steps):
self.name = name
self.titles = titles
self.dim_names = list(titles.keys())
self.dim_labels = list(titles.values())
self.num_dims = len(self.dim_names)
for i, labels in enumerate(self.dim_labels):
if isinstance(labels, list):
self.dim_labels[i] = ['All'] + labels
if isinstance(labels, int):
self.dim_labels[i] = ['All'] + list(map(str, range(labels)))
self.steps = 0
self.size = steps
self.time_axis = np.arange(steps)
self.free_dim = -1
self.datas = np.full([steps] + [len(x) - 1 for x in self.dim_labels], np.nan)
self._build_app()
self._create_thread()
def _build_app(self):
dropdowns = []
for name, labels in zip(self.dim_names, self.dim_labels):
dropdowns.append(name)
options = {str(i): label for i, label in enumerate(labels)}
dropdowns.append(dcc.Dropdown(id=name, options=options, value='0'))
app = Dash(__name__)
app.layout = html.Div([
html.H1(children=self.name, style={'textAlign': 'center'}),
html.Div(dropdowns),
html.Div([
dcc.Graph(id='live-graph'),
dcc.Interval(
id='interval-component',
interval=16,
n_intervals=0
)
])
])
for func_name, (args, kwargs) in callback_dict.items():
func = getattr(self, func_name)
app.callback(*args, **kwargs)(func)
app.callback(
[Output(i, 'value') for i in self.dim_names],
[Input(i, 'value') for i in self.dim_names]
)(self._update_figure)
self._update_figure(*(['1'] * self.num_dims))
self._app = app
def _create_thread(self):
port = 8050
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('localhost', port)):
break
else:
port += 1
run_server = lambda: self._app.run(host='0.0.0.0', port=port)
thread = threading.Thread(target=run_server)
thread.daemon = True
thread.start()
time.sleep(0.1)
print('live plot:', self.name, f'http://localhost:{port}')
self._thread = thread
def _update_figure(self, *values, save_path=None):
values = [str(v) for v in values]
idx = [slice(None)]
titles = [' ']
# print('free dim', self.free_dim)
free_dim = -1
for i, v in enumerate(values):
if v == '0':
if free_dim == -1:
free_dim = i
else:
values[i] = '1'
if free_dim != self.free_dim and self.free_dim != -1:
values[self.free_dim] = '1'
self.free_dim = free_dim
for i in range(self.num_dims):
if values[i] == '0':
titles = self.dim_labels[i][1:]
idx.append(slice(None))
else:
idx.append(int(values[i]) - 1)
self.idx = tuple(idx)
# print(self.idx)
# print(titles)
self._updating = True
self.fig = go.FigureWidget(make_subplots(rows=len(titles), cols=1, subplot_titles=titles))
for i, data in enumerate(self._get_plot_data()):
self.fig.add_trace(go.Scatter(name='', x=self.time_axis, y=data), row=i+1, col=1)
self.fig.update_layout(height=200*len(titles)+100, template='plotly')
self._updating = False
if save_path is not None:
self.fig.write_html(save_path)
# print(values)
return values
def _get_plot_data(self):
datas = self.datas[self.idx]
return np.expand_dims(datas, 0) if datas.ndim == 1 else np.swapaxes(datas, 0, 1)
def update(self, datas):
if isinstance(datas, torch.Tensor):
datas = datas.detach().cpu().numpy()
if self.steps >= self.size:
self.time_axis += 1
self.datas[:-1] = self.datas[1:]
self.datas[-1] = datas
else:
self.datas[self.steps] = datas
self.steps += 1
while self._updating:
time.sleep(0.01)
for i, data in enumerate(self._get_plot_data()):
self.fig.data[i]['x'] = self.time_axis
self.fig.data[i]['y'] = data
@callback(
Output('live-graph', 'figure'),
Input('interval-component', 'n_intervals')
)
def _update_graph(self, n):
return self.fig
def select_labels(self, *labels):
# ToDo update selector label
self._update_figure(*labels)
def snapshot(self, dir_path, free_dim=0):
def export(labels, names):
dim = len(labels)
if dim == self.num_dims:
name = self.name + ': ' + ' '.join(names) if names else self.name
save_path = os.path.join(dir_path, name) + '.html'
self._update_figure(*labels, save_path=save_path)
else:
if dim == free_dim:
export(labels + [0], names)
else:
for i, s in enumerate(self.dim_labels[dim][1:]):
export(labels + [i+1], names + [s])
export([], [])
def save(self, dir_path):
state = self.__dict__.copy()
state.pop('_app')
state.pop('_thread')
torch.save(state, os.path.join(dir_path, self.name + '.liveplot'))
@staticmethod
def load(path):
plot = LivePlot.__new__(LivePlot)
plot.__dict__ = torch.load(path)
plot._build_app()
plot._create_thread()
return plot
if __name__ == '__main__':
plot = LivePlot('1', {'1': ['a', 'b'], '2': 5}, 30)
plot2 = LivePlot('2', {'1': ['a', 'b'], '2': 5}, 30)
import time
for i in range(10):
plot.update(np.random.random([2, 5]))
plot2.update(np.random.random([2, 5]))
time.sleep(0.1)
| 6,737 |
Python
| 29.488688 | 98 | 0.523081 |
BeanSamuel/Exchange-Rate-Prediction-RL/NoobRL/runs/Noob/20231116-200419/config.yaml
|
task:
name: Noob
env:
num_envs: ${resolve_default:1,${...num_envs}}
train_data: ./train.csv
test_data: ${resolve_default:'./test.csv',${...test_data}}
window_size: 10
frame_bound:
- 1850
- 2850
train:
name: PPOAgent
params:
seed: ${...seed}
model:
actor_mlp:
- 256
- 256
critic_mlp:
- 256
- 256
load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}
config:
device: ${....rl_device}
save_frequency: 10
normalize_obs: true
normalize_value: false
normalize_advantage: true
horizon_length: 2048
max_epochs: ${resolve_default:200,${....max_iterations}}
mini_epochs: 6
minibatch_size: 512
tau: 0.75
gamma: 0.75
e_clip: 0.2
clip_value: false
learning_rate: 0.001
critic_loss_coef: 1
bounds_loss_coef: 10
grad_penalty_coef: 0
experiment: ''
num_envs: ''
seed: 42
torch_deterministic: false
rl_device: cpu
max_iterations: ''
test: false
checkpoint: ''
headless: false
enable_livestream: false
mt_timeout: 30
render: false
debug: false
wandb: true
save: true
profile: false
test_data: ''
| 1,197 |
YAML
| 19.305084 | 62 | 0.593985 |
DimensionLab/fmmr-water-tank/server.py
|
import json
from typing import List, Dict, Union
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from siml.siml_inferencer import WaterTankSimulator
class WaterTankSimulatorParameters(BaseModel):
inlet_velocity: float
class SimulatorSettings(BaseModel):
parameters: Union[WaterTankSimulatorParameters, None] = None
eco_mode: bool = False
class SimulatorInput(BaseModel):
parameters: Union[WaterTankSimulatorParameters, None] = None
resolution: List[int] = [32, 32, 32]
app = FastAPI()
FAKE_SIMULATORS_DB = {
"simulator123": WaterTankSimulator
}
LOADED_SIMULATORS = {}
@app.post("/init_simulator/{id}")
def simulate(id: str, settings: SimulatorSettings):
if id not in FAKE_SIMULATORS_DB:
raise HTTPException(status_code=404, detail="Simulator not found")
simulator_loader = FAKE_SIMULATORS_DB.get(id)
LOADED_SIMULATORS[id] = simulator_loader()
LOADED_SIMULATORS[id].eco = settings.eco_mode
LOADED_SIMULATORS[id].load_geometry()
LOADED_SIMULATORS[id].load_inferencer()
return {"message": "Simulator loaded."}
@app.post("/simulate/{id}")
def simulate(id: str, props: SimulatorInput):
if id not in LOADED_SIMULATORS:
raise HTTPException(status_code=404, detail="Simulator not loaded")
simulator = LOADED_SIMULATORS[id]
json_output = simulator.run_inference(props.parameters.inlet_velocity, props.resolution)
return json_output
# kept for testing the endpoint
@app.get("/hello")
def read_root():
return {"hello": "world"}
| 1,557 |
Python
| 24.129032 | 92 | 0.725755 |
DimensionLab/fmmr-water-tank/README.md
|
# Water tank simulator

Project commisioned for Faculty of materials, metallurgy and recyclation (FMMR) at Technical university of Košice.
This simulator mainly showcases the capabilities of parametrized AI-based physics simulator leveraging scientific deep learning methods (physics-informed neural networks - PINNs). The simple geometry has 1 inlet at the top and 2 outlets at the bottom.
| 509 |
Markdown
| 62.749992 | 251 | 0.821218 |
DimensionLab/fmmr-water-tank/siml/siml_inferencer.py
|
import sys, os
import json
import torch
import modulus
from sympy import Symbol, Eq, Abs, tanh
import numpy as np
import logging
from typing import List, Dict, Union
from pathlib import Path
from modulus.hydra.utils import compose
from modulus.hydra import to_yaml, to_absolute_path, instantiate_arch, ModulusConfig
from modulus.models.fully_connected import FullyConnectedArch
from modulus.domain.inferencer import (
OVVoxelInferencer,
)
from modulus.key import Key
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.basic import NormalDotVec
from modulus.geometry.tessellation import Tessellation
from json import JSONEncoder
from water_tank.constants import bounds
from water_tank.src.geometry import WaterTank
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
# Process ['u', 'v', 'w'] output, add 'a' as "alpha" and flatten to 1D array
def flatten_to_uvwa(data):
lst = []
for i in range(len(data['u'])):
for index, item in enumerate(['u', 'v', 'w']):
pts = data[item][i]
lst.append(float(pts))
if item == 'w':
lst.append(float(1))
return lst
class WaterTankSimulator(object):
"""Water tank Inference runner for OV scenario
Args:
cfg (ModulusConfig): Parsed Modulus config
"""
def __init__(
self,
mask_value: float = -100,
):
logging.getLogger().addHandler(logging.StreamHandler())
self.cfg = compose(config_path="../water_tank/conf", config_name="config_eval", job_name="water_tank_inference")
print(to_yaml(self.cfg))
##############################
# Nondimensionalization Params
##############################
# fluid params
# Water at 20°C (https://wiki.anton-paar.com/en/water/)
# https://en.wikipedia.org/wiki/Viscosity#Kinematic_viscosity
self.nu = 1.787e-06 # m2 * s-1
self.inlet_vel = Symbol("inlet_velocity")
self.rho = 1
self.scale = 1.0
self._eco = False
self._inferencer = None
self.bounds = bounds
self.mask_value = mask_value
@property
def eco(self):
return self._eco
@eco.setter
def eco(self, value: bool):
self._eco = value
if self._inferencer:
self._inferencer.eco = value
def load_inferencer(self, checkpoint_dir: Union[str, None] = None):
"""Create Modulus Water Tank inferencer object. This can take time since
it will initialize the model
Parameters
----------
checkpoint_dir : Union[str, None], optional
Directory to modulus checkpoint
"""
# make list of nodes to unroll graph on
ns = NavierStokes(nu=self.nu * self.scale, rho=self.rho, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
# self.progress_bar.value = 0.025
equation_nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
input_keys += [Key("inlet_velocity")]
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
)
self.flow_nodes = equation_nodes + [
flow_net.make_node(name="flow_network", jit=self.cfg.jit)
]
invar_keys = [
Key.from_str("x"),
Key.from_str("y"),
Key.from_str("z"),
Key.from_str("inlet_velocity"),
]
outvar_keys = [
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
]
self._inferencer = OVVoxelInferencer(
nodes=self.flow_nodes,
input_keys=invar_keys,
output_keys=outvar_keys,
mask_value=self.mask_value,
requires_grad=False,
eco=False,
# progress_bar=self.progress_bar, # TODO: implement setting progress
)
# Load checkpointed model
if checkpoint_dir is not None:
absolute_checkpoint_dir = Path(__file__).parent / checkpoint_dir
if absolute_checkpoint_dir.resolve().is_dir():
self._inferencer.load_models(absolute_checkpoint_dir.resolve())
else:
print("Could not find checkpointed model")
# Set eco
self._inferencer.eco = self.eco
def load_geometry(self):
# normalize meshes
def normalize_mesh(mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
stl_path = Path(self.data_path) / Path("stl_files")
self.interior_mesh = Tessellation.from_stl(
Path(stl_path) / Path("water_tank_closed.stl"), airtight=True
)
center = (0, 0, 0)
scale = 1.0
self.interior_mesh = normalize_mesh(self.interior_mesh, center, scale)
def run_inference(
self,
inlet_velocity: float,
resolution: List[int] = [256, 256, 256],
) -> Dict[str, np.array]:
"""Runs inference for Water Tank
Args:
resolution (List[int], optional): Voxel resolution. Defaults to [256, 256, 256].
Returns:
Dict[str, np.array]: Predicted output variables
"""
if self._inferencer is None:
print("Loading inferencer")
self.load_inferencer(checkpoint_dir="./checkpoints")
print("Loading geometry")
self.load_geometry()
# Eco mode settings
if self._inferencer.eco:
batch_size = 512
memory_fraction = 0.1
else:
vram_gb = torch.cuda.get_device_properties(0).total_memory / 10**9
batch_size = int((vram_gb // 6) * 16 * 1024)
memory_fraction = 1.0
mask_fn = (
lambda x, y, z: self.interior_mesh.sdf({"x": x, "y": y, "z": z}, {})["sdf"]
< 0
)
sp_array = np.ones((np.prod(resolution), 1))
specific_params = {
"inlet_velocity": inlet_velocity * sp_array,
}
# Set up the voxel sample domain
self._inferencer.setup_voxel_domain(
bounds=self.bounds,
npoints=resolution,
invar=specific_params,
batch_size=batch_size,
mask_fn=mask_fn,
)
# Perform inference
invar, predvar = self._inferencer.query(memory_fraction)
return self._to_json(predvar)
@property
def data_path(self):
data_dir = Path(os.path.dirname(__file__)) / Path("../data")
return str(data_dir)
# Process ['u', 'v', 'w'] output, add 'a' as "alpha" and flatten to 1D array
def _flatten_to_uvwa(self, data):
lst = []
for i in range(len(data['u'])):
for index, item in enumerate(['u', 'v', 'w']):
pts = data[item][i]
lst.append(float(pts))
if item == 'w':
lst.append(float(1))
return lst
def _to_json(self, data):
data['u'] = np.reshape(data['u'], (-1, 1))
data['v'] = np.reshape(data['v'], (-1, 1))
data['w'] = np.reshape(data['w'], (-1, 1))
data['v'] = np.reshape(data['v'], (-1, 1))
numpyData = {"array": [], "uvw": self._flatten_to_uvwa(data)}
return json.dumps(numpyData, cls=NumpyArrayEncoder)
# def run_inference(self):
# self.inf_button.text = "Running Inference..."
# print("Water tank inferencer started")
# if self.simulator_runner.eco:
# resolution_x = 64
# resolution_y = 32
# resolution_z = 64
# else:
# resolution_x = 128
# resolution_y = 128
# resolution_z = 128
# if (resolution_x, resolution_y, resolution_z) != self.resolution:
# print(
# f"Initializing inferencer with a resolution of {resolution_x}*{resolution_y}*{resolution_z}"
# )
# self.resolution = [resolution_x, resolution_y, resolution_z]
# print(
# f"Will run inferencing for inlet_velocity={self.inlet_velocity}"
# )
# pred_vars = self.simulator_runner.run_inference(
# inlet_velocity=self.inlet_velocity,
# resolution=list(self.resolution),
# )
# shape = tuple(self.resolution)
# u = pred_vars["u"].reshape(shape)
# v = pred_vars["v"].reshape(shape)
# w = pred_vars["w"].reshape(shape)
# velocity = np.stack([u, v, w], axis=-1)
# if velocity.dtype != np.float32:
# velocity = velocity.astype(np.float32)
# if velocity.shape != shape + (3,):
# raise RuntimeError(f"expected shape: {shape + (3,)}; got: {velocity.shape}")
# # Change to z axis first for VTK input (not sure why)
# # Tensor comes out of inferencer in ij index form
# velocity = np.ascontiguousarray(velocity.transpose(2, 1, 0, 3))
# self.inf_progress.value = 0.95
# np.seterr(invalid="ignore")
# mask = np.where(velocity == self.simulator_runner.mask_value)
# velocity[mask] = 0.0
# velmag = np.linalg.norm(velocity, axis=3)
# # velmag = velmag / np.amax(velmag)
# minval = np.amin(velmag)
# maxval = np.amax(velmag)
# print("Test", maxval, minval)
# self._velocity = velocity
# self._velmag = velmag
# # self._mask = spatial_mask
# self._vel_mask = mask
# self._bounds = np.array(self.simulator_runner.bounds).flatten()
# print("WaterTankScenario inference ended")
# self._eval_complete = True
# self.inf_progress.value = 1.0
# self.inf_button.text = "Inference"
| 10,248 |
Python
| 31.536508 | 120 | 0.556401 |
DimensionLab/fmmr-water-tank/water_tank/src/geometry.py
|
import numpy as np
from sympy import sqrt, Max
from modulus.hydra import to_absolute_path
from modulus.geometry.tessellation import Tessellation
class WaterTank:
"""Water tank geometry"""
inlet_area = None
def __init__(self):
# read stl files to make geometry
point_path = to_absolute_path("./data/stl_files")
inlet_mesh = Tessellation.from_stl(
point_path + "/inlet.stl", airtight=False
)
outlet_left_mesh = Tessellation.from_stl(
point_path + "/outlet_left.stl", airtight=False
)
outlet_right_mesh = Tessellation.from_stl(
point_path + "/outlet_right.stl", airtight=False
)
noslip_mesh = Tessellation.from_stl(
point_path + "/water_tank_noslip.stl", airtight=False
)
interior_mesh = Tessellation.from_stl(
point_path + "/water_tank_closed.stl", airtight=True
)
# scale and normalize mesh and openfoam data
self.center = (0, 0, 0)
self.scale = 1.0
self.inlet_mesh = self.normalize_mesh(inlet_mesh, self.center, self.scale)
self.outlet_left_mesh = self.normalize_mesh(outlet_left_mesh, self.center, self.scale)
self.outlet_right_mesh = self.normalize_mesh(outlet_right_mesh, self.center, self.scale)
self.noslip_mesh = self.normalize_mesh(noslip_mesh, self.center, self.scale)
self.interior_mesh = self.normalize_mesh(interior_mesh, self.center, self.scale)
# geom params
self.inlet_normal = (0.0, 0.0, -2.0)
self.inlet_center = (0.0, 0.0, 3.0)
s = inlet_mesh.sample_boundary(nr_points=10000)
self.inlet_area = np.sum(s["area"])
print("Surface Area: {:.3f}".format(self.inlet_area))
self.inlet_radius = np.sqrt(self.inlet_area / np.pi)
s = self.interior_mesh.sample_interior(nr_points=10000, compute_sdf_derivatives=True)
print("Volume: {:.3f}".format(np.sum(s["area"])))
# inlet velocity profile
def circular_parabola(self, x, y, z, center, normal, radius, max_vel):
centered_x = x - center[0]
centered_y = y - center[1]
centered_z = z - center[2]
distance = sqrt(centered_x ** 2 + centered_y ** 2 + centered_z ** 2)
parabola = max_vel * Max((1 - (distance / radius) ** 2), 0)
return normal[0] * parabola, normal[1] * parabola, normal[2] * parabola
# normalize meshes
def normalize_mesh(self, mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
# normalize invars
def normalize_invar(self, invar, center, scale, dims=2):
invar["x"] -= center[0]
invar["y"] -= center[1]
invar["z"] -= center[2]
invar["x"] *= scale
invar["y"] *= scale
invar["z"] *= scale
if "area" in invar.keys():
invar["area"] *= scale ** dims
return invar
| 2,962 |
Python
| 37.480519 | 96 | 0.597232 |
DimensionLab/fmmr-water-tank/water_tank/src/water_tank.py
|
from sympy import Symbol
from modulus.hydra import instantiate_arch, ModulusConfig
from modulus.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.basic import NormalDotVec
# params
# Water at 20°C (https://wiki.anton-paar.com/en/water/)
# https://en.wikipedia.org/wiki/Viscosity#Kinematic_viscosity
nu = 1.787e-06 # m2 * s-1
inlet_vel = Symbol("inlet_velocity")
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# parameterization
inlet_vel_range = (0.05, 10.0)
inlet_vel_params = {inlet_vel: inlet_vel_range}
def network(cfg: ModulusConfig, scale):
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nu * scale, rho=1.0, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z"), Key("inlet_velocity")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
cfg=cfg.arch.fully_connected,
)
return (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network", jit=cfg.jit)]
)
def constraints(cfg: ModulusConfig, geo, nodes, domain):
# add constraints to solver
# inlet
u, v, w = geo.circular_parabola(
x,
y,
z,
center=geo.inlet_center,
normal=geo.inlet_normal,
radius=geo.inlet_radius,
max_vel=inlet_vel,
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.inlet_mesh,
outvar={"u": u, "v": v, "w": w},
batch_size=cfg.batch_size.inlet,
parameterization=inlet_vel_params
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet_left = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_left_mesh,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
parameterization=inlet_vel_params
)
domain.add_constraint(outlet_left, "outlet_left")
outlet_right = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_right_mesh,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
parameterization=inlet_vel_params
)
domain.add_constraint(outlet_right, "outlet_right")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.noslip_mesh,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
parameterization=inlet_vel_params
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo.interior_mesh,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.interior,
parameterization=inlet_vel_params
)
domain.add_constraint(interior, "interior")
# Integral Continuity 1
# TODO: add integral plane somewhere into the geometry
# Integral Continuity 2
integral_continuity_outlet_left = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_left_mesh,
outvar={"normal_dot_vel": 2.540},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
parameterization=inlet_vel_params
)
domain.add_constraint(integral_continuity_outlet_left, "integral_continuity_2")
# Integral Continuity 3
integral_continuity_outlet_right = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo.outlet_right_mesh,
outvar={"normal_dot_vel": 2.540},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
parameterization=inlet_vel_params
)
domain.add_constraint(integral_continuity_outlet_right, "integral_continuity_3")
| 4,051 |
Python
| 30.905512 | 84 | 0.643051 |
DimensionLab/fmmr-water-tank/water_tank/outputs/water_tank/wandb/run-20221010_114502-21ugz615/files/config.yaml
|
wandb_version: 1
_wandb:
desc: null
value:
cli_version: 0.11.2
framework: torch
is_jupyter_run: false
is_kaggle_kernel: false
python_version: 3.8.10
t:
1:
- 1
2:
- 1
- 3
4: 3.8.10
5: 0.11.2
8:
- 5
| 280 |
YAML
| 12.380952 | 27 | 0.464286 |
DimensionLab/fmmr-water-tank/water_tank/conf/config.yaml
|
defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
save_filetypes: "vtk,np"
custom:
external_monitor_platform:
name: wandb
entity: "michaltakac"
project: "water-tank"
api_key: "e67b70a695e41d3d00689deba4e87c6b6d4a7cdc" # get your api key from Neptune.ai
scheduler:
decay_rate: 0.95
decay_steps: 15000
training:
rec_results_freq : 200
rec_constraint_freq: 50000
max_steps : 1500000
batch_size:
inlet: 650
outlet: 650
no_slip: 5200
interior: 6000
integral_continuity: 310
| 604 |
YAML
| 17.333333 | 90 | 0.688742 |
DimensionLab/fmmr-water-tank/docs/CHANGELOG.md
|
# Water tank example
## [1.0.0] - 2022-10-14
Initial version, working with Modulus 22.09
| 90 |
Markdown
| 17.199997 | 43 | 0.7 |
iMAPSRoboticsTeam/Issac_Sim_Template/SimulationModule.py
|
import omni.isaac.core.utils.stage as stage
from omni.isaac.core.robots import Robot
import numpy as np
import os
class SimulationModule(): # Rename to unique module name
def setup_scene(self, sim):
usd_path = "Path_to_file"
world = sim.get_world()
world.scene.add_default_ground_plane()
stage.add_reference_to_stage(usd_path=usd_path,
prim_path="/World/Robot")
world.scene.add(Robot(
prim_path="/World/Robot",
name="Robot",
scale=np.array([0.01,0.01,0.01]),
orientation=np.array([0.0, 0.0, 0.0, 1.0]),
position=np.array([0.0, 0.0, 0.3]),)
)
return
def setup_post_load(self, sim, func):
sim._world = sim.get_world()
sim._robot = sim._world.scene.get_object("Robot")
sim._world.add_physics_callback("sending_actions", callback_fn=func)
return
def simulation(self):
pass
| 980 |
Python
| 29.656249 | 76 | 0.570408 |
iMAPSRoboticsTeam/Issac_Sim_Template/Extension.py
|
from abc import abstractmethod
import omni.ext
from omni.isaac.ui.ui_utils import setup_ui_headers, get_style, btn_builder, state_btn_builder, cb_builder
import omni.ui as ui
from omni.kit.menu.utils import add_menu_items, remove_menu_items, MenuItemDescription
from omni.isaac.core import World
from .Simulation import Simulation
import weakref
import os
import asyncio
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class Extension(omni.ext.IExt, Simulation):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id: str):
self._menuItems = None
self._buttons = None
self._resetButtons = None
self._GolbalReset = None
self._ext_id = ext_id
self.name = "Robot"
self.robot = Simulation()
self.extra_frames = []
self.start_Extension()
return
def start_Extension(
self,
menu_name = None,
submenu_name = None,
name = "Simulation Suite",
title = "Simulation",
doc_link = None,
overview = "This is a complete simulation package",
file_path = os.path.abspath(__file__),
number_of_extra_frames = 1,
):
menu_items = [MenuItemDescription(name=name, onclick_fn=lambda a=weakref.proxy(self): a._menu_callback())]
if menu_name == "" or menu_name is None:
self._menu_items = menu_items
elif submenu_name == "" or submenu_name is None:
self._menu_items = [MenuItemDescription(name=menu_name, sub_menu=menu_items)]
else:
self._menu_items = [
MenuItemDescription(
name=menu_name, sub_menu=[MenuItemDescription(name=submenu_name, sub_menu=menu_items)]
)
]
add_menu_items(self._menu_items, name)
self._buttons = dict()
self._resetButtons = dict()
self._GolbalReset = dict()
self._build_ui(
name=name,
title=title,
doc_link=doc_link,
overview=overview,
file_path=file_path,
number_of_extra_frames=number_of_extra_frames,
ext_id=self._ext_id,
)
"""
USER INTERFACE
"""
def _build_ui(self,
name,
title,
doc_link,
overview,
file_path,
number_of_extra_frames,
ext_id,
):
self._window = ui.Window(name, width=360, height=0, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM)
with self._window.frame:
with ui.VStack(spacing=5, height=10):
#title = ("StakeBot Simulation")
#doc_link = ("https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.ui/docs/index.html")
# overview = (
# "This is a complete control pannel for simulating and testing the StakeBot\n"
# )
setup_ui_headers(ext_id, file_path, title, doc_link, overview)
frame = ui.CollapsableFrame(
title="Simulation Name",
height=0,
collasped=False,
style=get_style(),
style_type_name_override="CollapsableFrame",
horizontal_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_AS_NEEDED,
vertical_scrollbar_policy=ui.ScrollBarPolicy.SCROLLBAR_ALWAYS_ON,
)
with frame:
with ui.VStack(style=get_style(), spacing=5, height=0):
dict = {
"label": "Load Simulation", ## Give Simulation a Name
"type": "button",
"text": "Load",
"tooltip": "Edit ToolTip",
"on_clicked_fn": self._load_new_world,
}
self._buttons["Load New Simulator"] = btn_builder(**dict) # Give button a unique name
self._buttons["Load New Simulator"].enabled = True
dict = {
"label": "Sim Reset", # Give reset button a name
"type": "button",
"text": "Reset",
"tooltip": "Reset Simulation",
"on_clicked_fn": self._on_reset,
}
self._resetButtons["Sim Reset"] = btn_builder(**dict) # Create a unique name for button
self._resetButtons["Sim Reset"].enabled = False
dict = {
"label": "Reset Simulation",
"type": "button",
"text": "Reset Simulation",
"tooltip": "Reset EtherBot Simulation",
"on_clicked_fn": self._reset_all,
}
self._GolbalReset["Reset"] = btn_builder(**dict)
"""
"""
def _load_new_world(self): # rename function to unique world name
self.robot._loadWorld = "Simulation" # Rename this to unique world name
async def _on_load_world_async():
await self.robot.load_world_async()
await omni.kit.app.get_app().next_update_async()
self.robot._world.add_stage_callback("stage_event_1", self.on_stage_event)
self._enable_all_buttons(self._buttons, False)
self._resetButtons["Sim Reset"].enabled = True
self.post_load_button_event()
self.robot._world.add_timeline_callback("stop_reset_event", self._reset_on_stop_event)
asyncio.ensure_future(_on_load_world_async())
return
def _menu_callback(self):
self._window.visible = not self._window.visible
return
def _enable_all_buttons(self, type, flag):
for btn_name, btn in type.items():
if isinstance(btn, omni.ui._ui.Button):
btn.enabled = flag
return
def on_stage_event(self, event):
if event.type == int(omni.usd.StageEventType.CLOSED):
if World.instance() is not None:
self.robot._world_cleanup()
self.robot._world.clear_instance()
if hasattr(self, "_buttons"):
if self._buttons is not None:
self._enable_all_buttons(self._buttons, False)
return
def _reset_on_stop_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
try:
self.post_clear_button_event()
except:
pass
return
def _on_reset(self):
async def _on_reset_async():
await self.robot.reset_async()
await omni.kit.app.get_app().next_update_async()
self.post_reset_button_event()
self.robot._world.clear_instance()
asyncio.ensure_future(_on_reset_async())
return
@abstractmethod
def post_reset_button_event(self):
return
@abstractmethod
def post_load_button_event(self):
return
@abstractmethod
def post_clear_button_event(self):
return
def _sample_window_cleanup(self):
remove_menu_items(self._menu_items, self.name)
self._window = None
self._menu_items = None
self._buttons = None
self._resetButtons = None
self._GolbalReset = None
return
def shutdown_cleanup(self):
return
def on_shutdown(self):
if self.robot._world is not None:
self.robot._world_cleanup()
if self._menu_items is not None:
self._sample_window_cleanup()
if self._buttons is not None:
self._enable_all_buttons(self._buttons, False)
if self._resetButtons is not None:
self._enable_all_buttons(self._resetButtons, False)
if self._GolbalReset is not None:
self._GolbalReset["Reset"].enabled = False
self.shutdown_cleanup()
if self.robot._world is not None:
self.robot._world.clear_instance()
self.robot._world.clear()
return
def _reset_all(self):
async def _on_reset_all_async():
try:
if self.robot._world is not None:
await self.robot._world.stop_async()
await omni.kit.app.get_app().next_update_async()
self.robot._world.clear_instance()
self.robot._world.clear()
self._enable_all_buttons(self._buttons, True)
self._enable_all_buttons(self._resetButtons, False)
except:
pass
asyncio.ensure_future(_on_reset_all_async())
return
| 9,508 |
Python
| 36.58498 | 125 | 0.523138 |
iMAPSRoboticsTeam/Issac_Sim_Template/__init__.py
|
from .Extension import *
from .SimulationBase import SimulationBase
from .Simulation import Simulation
| 102 |
Python
| 33.333322 | 42 | 0.852941 |
iMAPSRoboticsTeam/Issac_Sim_Template/Simulation.py
|
from .SimulationBase import SimulationBase
from .SimulationModule import SimulationModule
from omni.isaac.core.utils.types import ArticulationAction
import numpy as np
class Simulation(SimulationBase):
def __init__(self) -> None:
super().__init__()
self._loadWorld = None
self.simMod = SimulationModule()
## Add new Simulation Modules
return
# Setup Scene
def setup_scene(self, loadWorld):
if loadWorld == "Simulation":
self.simMod.setup_post_load(self, self.send_actions)
# elif loadWorld == "NewSim":
# load new sim module
return
# Setup Post Load
async def setup_post_load(self, loadWorld):
if loadWorld == "Simulation":
self.simMod.setup_post_load(self, self.send_actions)
# elif loadWorld == "NewSim":
# load new sim module
return
def send_actions(self, step_size):
self.simMod.simulation()
self._robot.apply_action(ArticulationAction(joint_positions=None,
joint_efforts=None,
joint_velocities=np.array([5,5,5,5])*-1))
return
| 1,257 |
Python
| 28.95238 | 73 | 0.567224 |
iMAPSRoboticsTeam/Issac_Sim_Template/README.md
|
# Issac_Sim_Template
This template is used to help get started on developing simulations with Issac Sim.
# How To Use
## Create An Extension
First create a new extension by opening the extension window
Window >> Extensions
Next click the green plus sign on the top left of the window, Select "New Extension Template Project"
Save this to any location
Give it a project name (No Spaces) and use iMAPS for the ext.id name
VS Code should now open.
## Clone The Issac_Sim_Template
Next, open a git bash terminal
Click on the three dots in the upper left corner of VS Code
Click "New Terminal" a terminal will open on the bottom of VS Code window. In this window, click on the down arrow next to the white plus sign, click Git Bash.
> Assuming you have git installed on your windows machine click source control in VS Code to downolad.
This is a git bash terminal, here we can clone the repo into the extension by typing.
```
git clone https://github.com/iMAPSRoboticsTeam/Issac_Sim_Template.git
```
## Run the Setup Script
After the repo has been cloned, open file explorer, locate and double click the setup.bat file inside the repo. It should be inside your extension folder.
After you run this script the code template should now be ready to use. Issac Sim should now have a new extension window.
For Linux, run the following command:
```
./Issac_Sim_Template/setup.sh
```
# Getting Started
This template sets up the developer with all the necessary functions to create and build simulations through a GUI.
Inside the Extension.py file is the code for the GUI. Here you can copy the code below while changing the names of the assets that are stored in their respective dictionaries.
```
dict = {
"label": "Load Simulation", ## Give Simulation a Name
"type": "button",
"text": "Load",
"tooltip": "Edit ToolTip",
"on_clicked_fn": self._load_new_world,
}
self._buttons["Load New Simulator"] = btn_builder(**dict) # Give button a unique name
self._buttons["Load New Simulator"].enabled = True
dict = {
"label": "Sim Reset", # Give reset button a name
"type": "button",
"text": "Reset",
"tooltip": "Reset Simulation",
"on_clicked_fn": self._on_reset,
}
self._resetButtons["Sim Reset"] = btn_builder(**dict) # Create a unique name for button
self._resetButtons["Sim Reset"].enabled = False
```
The Simulations are created within their own respected python files. The SimulationModule.py is an example of how to setup a simulation module for a specific purpose. Inside this file is where the robot and world are loaded into Issac Sim and where the simulation function (function that is used to control the simulation) are placed.
This file stores a class that will be called within the Simulation.py file. This file is where the simulation runs and will call the simulation function within the respected simulation module file. The ArticulationAction class is called inside this file and is used to drive the joints inside the loaded robot file.
# Good Luck
If you get stuck or need help understanding the code better, please email me
```
[email protected]
```
or text me. Issac Sim can be very complicated but hopfully this template will help you better understand the proccess with robotic simulation development.
Good Luck!
| 3,305 |
Markdown
| 33.8 | 335 | 0.750983 |
elharirymatteo/RANS/README.md
|

## About this repository
This repo is an extension of the Isaac Gym Envs library present at https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs. There, you can find further details and instructions regarding the default tasks (`AllegroHand`, `Ant`, `Anymal`, `AnymalTerrain`, `BallBalance`, `Cartpole`, `Crazyflie`, `FrankaCabinet`, `Humanoid`, `Ingenuity`, `Quadcopter`, `ShadowHand`, `ShadowHandOpenAI_FF`, `ShadowHandOpenAI_LSTM`).
The main additions to the Reinforcement Learning examples provided by Omniverse Isaac Gym are environments related to **Space Robotics**.
Firstly, we start by providing a 2D environmnet, which serves as a simpler version of a realistic spacecraft. The modelled 2D system can be tested with a real rigid structure floating on top of an extremely flat and smooth surface using air bearings. This system is a common solution to emulate free-floating and free-flying satellite motion. This intermediate step is especially important for demonstrating the sim-to-real transfer of the DRL policies trained within Omniverse.
Secondly, we provide a full 3D environment to allow the simulation of free flying spacecrafts in space.
| 3DoF go to XY | 3DoF go to Pose | 6DoF go to XYZ |
| :-: | :-: | :-: |
|  |  |  |
---
## Task Description
Currently we provide two primary environments, each tailored to simulate distinct robotic systems:
1. **3 Degrees of Freedom (3DoF) Robot Simulation:**
The simulator replicates the behavior of the 3DoF robot situated in the ZeroG Lab of the University of Luxembourg (SpaceR group). The system is equipped with 8 thrusters.
In this environment, the following tasks are defined:
- **GoToXY:** Task for position control.
- **GoToPose-2D:** Task for position-attitude control.
- **TrackXYVelocity:** Agent learns to track linear velocities in the xy plane.
- **TrackXYOVelocity:** Agent learns to track both linear and angular velocities.
2. **6 Degrees of Freedom (6DoF) Robot Simulation:**
The simulator emulates spacecraft maneuvers in space, featuring a 6DoF robot configuration with 16 thrusters.
The tasks defined for this environment are:
- **GoToXYZ:** Task for precise spatial positioning.
- **GoToPose-3D:** Task for accurate spatial positioning and orientation.
#### Thrusters Configuration
The default thrusters configuration for both 3DoF and 6DoF scenarios is depicted in the following images, showing the direction of forces applied by the thrusters mounted on the systems.
| 3DoF Thrusters Configuration | 6DoF Thrusters Configuration |
| :-: | :-: |
| <img src="omniisaacgymenvs/images/config3Dof.png" width="200"/> | <img src="omniisaacgymenvs/images/config6Dof.png" width="200"/> |
---
## Installation
Follow the Isaac Sim [documentation](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_workstation.html) to install the latest Isaac Sim release.
*Examples in this repository rely on features from the most recent Isaac Sim release. Please make sure to update any existing Isaac Sim build to the latest release version, 2022.2.0, to ensure examples work as expected.*
### OmniverseIsaacGymEnvs
Once installed, this repository can be used as a python module, `omniisaacgymenvs`, with the python executable provided in Isaac Sim.
To install `omniisaacgymenvs`, first clone this repository:
```bash
git clone https://github.com/elharirymatteo/RANS.git
```
Once cloned, locate the [python executable in Isaac Sim](https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html). By default, this should be `python.sh`. We will refer to this path as `PYTHON_PATH`.
To set a `PYTHON_PATH` variable in the terminal that links to the python executable, we can run a command that resembles the following. Make sure to update the paths to your local path.
```
For Linux: alias PYTHON_PATH=~/.local/share/ov/pkg/isaac_sim-*/python.sh
For Windows: doskey PYTHON_PATH=C:\Users\user\AppData\Local\ov\pkg\isaac_sim-*\python.bat $*
For IsaacSim Docker: alias PYTHON_PATH=/isaac-sim/python.sh
```
Install `omniisaacgymenvs` as a python module for `PYTHON_PATH`:
```bash
PYTHON_PATH -m pip install -e .
```
### RL Games
We use the [rl-games](https://pypi.org/project/rl-games/1.0.2/) library as a starting point to rework the PPO implementation for the agents we train.
To install the appropriate version of rl-games, clone this repository **INSIDE** RANS:
```bash
git clone https://github.com/AntoineRichard/rl_games
```
Make sure to install the rl_gamers library under the OmniverseIsaacGym dependecy:
```
cd rl_games
PYTHON_PATH -m pip install --upgrade pip
PYTHON_PATH -m pip install -e .
```
## Running the examples
*Note: All commands should be executed from `OmniIsaacGymEnvs/omniisaacgymenvs`.*
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Training new agents</span></summary>
To train your first policy, run:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP
```
You should see an Isaac Sim window pop up. Once Isaac Sim initialization completes, the FloatingPlatform scene will be constructed and simulation will start running automatically. The process will terminate once training finishes.
Here's another example - GoToPose - using the multi-threaded training script:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP
```
Note that by default, we show a Viewport window with rendering, which slows down training. You can choose to close the Viewport window during training for better performance. The Viewport window can be re-enabled by selecting `Window > Viewport` from the top menu bar.
To achieve maximum performance, launch training in `headless` mode as follows:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP headless=True
```
#### A Note on the Startup Time of the Simulation
Some of the examples could take a few minutes to load because the startup time scales based on the number of environments. The startup time will continually
be optimized in future releases.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Loading trained models (or checkpoints)</span></summary>
Checkpoints are saved in the folder `runs/EXPERIMENT_NAME/nn` where `EXPERIMENT_NAME`
defaults to the task name, but can also be overridden via the `experiment` argument.
To load a trained checkpoint and continue training, use the `checkpoint` argument:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP checkpoint=runs/MFP2D_Virtual_GoToPose/nn/MFP2D_Virtual_GoToPose.pth
```
To load a trained checkpoint and only perform inference (no training), pass `test=True`
as an argument, along with the checkpoint name. To avoid rendering overhead, you may
also want to run with fewer environments using `num_envs=64`:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToPose train=virtual_floating_platform/MFP2D_PPOmulti_dict_MLP checkpoint=runs/MFP2D_Virtual_GoToPose/nn/MFP2D_Virtual_GoToPose.pth test=True num_envs=64
```
Note that if there are special characters such as `[` or `=` in the checkpoint names,
you will need to escape them and put quotes around the string. For example,
`checkpoint="runs/Ant/nn/last_Antep\=501rew\[5981.31\].pth"`
</details>
## Training Scripts
All scripts provided in `omniisaacgymenvs/scripts` can be launched directly with `PYTHON_PATH`.
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Random policy</span></summary>
To test out a task without RL in the loop, run the random policy script with:
```bash
PYTHON_PATH scripts/random_policy.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY
```
This script will sample random actions from the action space and apply these actions to your task without running any RL policies. Simulation should start automatically after launching the script, and will run indefinitely until terminated.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Train on single GPU</span></summary>
To run a simple form of PPO from `rl_games`, use the single-threaded training script:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY
```
This script creates an instance of the PPO runner in `rl_games` and automatically launches training and simulation. Once training completes (the total number of iterations have been reached), the script will exit. If running inference with `test=True checkpoint=<path/to/checkpoint>`, the script will run indefinitely until terminated. Note that this script will have limitations on interaction with the UI.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Train on multiple GPUs</span></summary>
Lastly, we provide a multi-threaded training script that executes the RL policy on a separate thread than the main thread used for simulation and rendering:
```bash
PYTHON_PATH scripts/rlgames_train_mfp.py task=virtual_floating_platform/MFP2D_Virtual_GoToXY
```
This script uses the same RL Games PPO policy as the above, but runs the RL loop on a new thread. Communication between the RL thread and the main thread happens on threaded Queues. Simulation will start automatically, but the script will **not** exit when training terminates, except when running in headless mode. Simulation will stop when training completes or can be stopped by clicking on the Stop button in the UI. Training can be launched again by clicking on the Play button. Similarly, if running inference with `test=True checkpoint=<path/to/checkpoint>`, simulation will run until the Stop button is clicked, or the script will run indefinitely until the process is terminated.
</details>
<details>
<summary><span style="font-size: 1.3em; font-weight: bold;">Configuration and command line arguments</span></summary>
We use [Hydra](https://hydra.cc/docs/intro/) to manage the config.
Common arguments for the training scripts are:
* `task=TASK` - Selects which task to use. Any of `MFP2D_Virtual_GoToXY`, `MFP2D_Virtual_GoToPose`, `MFP2D_Virtual_TrackXYVelocity`, `MFP2D_Virtual_TrackXYOVelocity`, `MFP3D_Virtual_GoToXYZ`, `MFP3D_Virtual_GoToPose`, (these correspond to the config for each environment in the folder `omniisaacgymenvs/cfg/task/virtual_floating_platform`)
* `train=TRAIN` - Selects which training config to use. Will automatically default to the correct config for the environment (ie. `<TASK>PPO`).
* `num_envs=NUM_ENVS` - Selects the number of environments to use (overriding the default number of environments set in the task config).
* `seed=SEED` - Sets a seed value for randomization, and overrides the default seed in the task config
* `pipeline=PIPELINE` - Which API pipeline to use. Defaults to `gpu`, can also set to `cpu`. When using the `gpu` pipeline, all data stays on the GPU. When using the `cpu` pipeline, simulation can run on either CPU or GPU, depending on the `sim_device` setting, but a copy of the data is always made on the CPU at every step.
* `sim_device=SIM_DEVICE` - Device used for physics simulation. Set to `gpu` (default) to use GPU and to `cpu` for CPU.
* `device_id=DEVICE_ID` - Device ID for GPU to use for simulation and task. Defaults to `0`. This parameter will only be used if simulation runs on GPU.
* `rl_device=RL_DEVICE` - Which device / ID to use for the RL algorithm. Defaults to `cuda:0`, and follows PyTorch-like device syntax.
* `test=TEST`- If set to `True`, only runs inference on the policy and does not do any training.
* `checkpoint=CHECKPOINT_PATH` - Path to the checkpoint to load for training or testing.
* `headless=HEADLESS` - Whether to run in headless mode.
* `experiment=EXPERIMENT` - Sets the name of the experiment.
* `max_iterations=MAX_ITERATIONS` - Sets how many iterations to run for. Reasonable defaults are provided for the provided environments.
* `warp=WARP` - If set to True, launch the task implemented with Warp backend (Note: not all tasks have a Warp implementation).
* `kit_app=KIT_APP` - Specifies the absolute path to the kit app file to be used.
Hydra also allows setting variables inside config files directly as command line arguments. As an example, to set the minibatch size for a rl_games training run, you can use `train.params.config.minibatch_size=64`. Similarly, variables in task configs can also be set. For example, `task.env.episodeLength=100`.
#### Hydra Notes
Default values for each of these are found in the `omniisaacgymenvs/cfg/config.yaml` file.
The way that the `task` and `train` portions of the config works are through the use of config groups.
You can learn more about how these work [here](https://hydra.cc/docs/tutorials/structured_config/config_groups/)
The actual configs for `task` are in `omniisaacgymenvs/cfg/task/<TASK>.yaml` and for `train` in `omniisaacgymenvs/cfg/train/<TASK>PPO.yaml`.
In some places in the config you will find other variables referenced (for example,
`num_actors: ${....task.env.numEnvs}`). Each `.` represents going one level up in the config hierarchy.
This is documented fully [here](https://omegaconf.readthedocs.io/en/latest/usage.html#variable-interpolation).
</details>
### Tensorboard
Tensorboard can be launched during training via the following command:
```bash
PYTHON_PATH -m tensorboard.main --logdir runs/EXPERIMENT_NAME/summaries
```
## WandB support
You can run (WandB)[https://wandb.ai/] with OmniIsaacGymEnvs by setting `wandb_activate=True` flag from the command line. You can set the group, name, entity, and project for the run by setting the `wandb_group`, `wandb_name`, `wandb_entity` and `wandb_project` arguments. Make sure you have WandB installed in the Isaac Sim Python executable with `PYTHON_PATH -m pip install wandb` before activating.
## Citation
If you use the current repository in your work, we suggest citing the following papers:
```bibtex
@article{el2023drift,
title={DRIFT: Deep Reinforcement Learning for Intelligent Floating Platforms Trajectories},
author={El-Hariry, Matteo and Richard, Antoine and Muralidharan, Vivek and Yalcin, Baris Can and Geist, Matthieu and Olivares-Mendez, Miguel},
journal={arXiv preprint arXiv:2310.04266},
year={2023}
}
@article{el2023rans,
title={RANS: Highly-Parallelised Simulator for Reinforcement Learning based Autonomous Navigating Spacecrafts},
author={El-Hariry, Matteo and Richard, Antoine and Olivares-Mendez, Miguel},
journal={arXiv preprint arXiv:2310.07393},
year={2023}
}
```
## Directory Structure
```bash
.
├── cfg
│ ├── controller # Optimal Controllers configurations
│ ├── hl_task # High-level task configurations
│ ├── task # Task configurations
│ │ └── virtual_floating_platform # Virtual floating platform task configurations
│ └── train # Training configurations
│ └── virtual_floating_platform # Virtual floating platform training configurations
├── checkpoints # Checkpoints for saved models
├── conf_runs # Configuration runs for training
├── demos # Demonstration files (gifs)
├── envs
│ └── BuoyancyPhysics # Environment related to buoyancy physics
├── images # Image files
├── mujoco_envs
│ ├── controllers # Controllers for Mujoco environments
│ ├── environments # Mujoco environments
│ └── legacy # Legacy Mujoco environment files
├── notebooks # Jupyter notebooks
├── robots
│ ├── articulations # Articulation-related files
│ │ ├── utils # Utilities for articulations
│ │ └── views # Articulation views
│ └── usd # USD-related files
├── ros # ROS-related files
├── scripts # Utility scripts
├── skrl # Reinforcement learning utilities
├── tasks
│ ├── base # Base task implementations
│ ├── buoyancy # Buoyancy-related tasks
│ ├── factory # Factory task configurations
│ │ └── yaml # YAML configurations for factory tasks
│ ├── shared # Shared task implementations
│ ├── utils # Task utility functions
│ └── virtual_floating_platform # Task implementations for virtual floating platform
├── utils
│ ├── config_utils # Configuration utilities
│ ├── domain_randomization # Domain randomization utilities
│ ├── hydra_cfg # Hydra configuration utilities
│ ├── rlgames # Utilities for rlgames
│ ├── terrain_utils # Terrain-related utilities
│ └── usd_utils # USD-related utilities
└── videos # Video files
```
| 17,693 |
Markdown
| 56.635179 | 688 | 0.730911 |
elharirymatteo/RANS/omniisaacgymenvs/ros/ros_node.py
|
from typing import Callable, NamedTuple, Optional, Union, List, Dict
from collections import deque
import numpy as np
import datetime
import torch
import os
import rospy
from std_msgs.msg import ByteMultiArray
from geometry_msgs.msg import PoseStamped, Point, Pose
from omniisaacgymenvs.ros.ros_utills import derive_velocities
from omniisaacgymenvs.mujoco_envs.controllers.hl_controllers import (
PoseController,
PositionController,
VelocityTracker,
DockController,
)
from omniisaacgymenvs.mujoco_envs.environments.disturbances import (
RandomKillThrusters,
Disturbances,
)
class RLPlayerNode:
def __init__(
self,
hl_controller: Union[PositionController, PoseController, VelocityTracker, DockController],
cfg: dict,
map: List[int] = [2, 5, 4, 7, 6, 1, 0, 3],
debug: bool = False,
) -> None:
"""
Args:
hl_controller (Union[PositionController, PoseController, VelocityTracker]): The high-level controller.
map (List[int], optional): The map of the thrusters. Defaults to [2, 5, 4, 7, 6, 1, 0, 3].
platform (Dict[str, Union[bool, dict, float, str, int]], optional): The platform configuration. Defaults to None.
disturbances (Dict[str, Union[bool, float]], optional): The disturbances. Defaults to None.
"""
platform = cfg["task"]["env"]["platform"]
disturbances = cfg["task"]["env"]["disturbances"]
self.play_rate = 1 / (
cfg["task"]["env"]["controlFrequencyInv"] * cfg["task"]["sim"]["dt"]
)
self.run_time = cfg["task"]["env"]["maxEpisodeLength"] / self.play_rate
self.DR = Disturbances(disturbances, platform["seed"])
self.TK = RandomKillThrusters(
{
"num_thrusters_to_kill": platform["randomization"]["max_thruster_kill"]
* platform["randomization"]["kill_thrusters"],
"seed": platform["seed"],
}
)
# Initialize variables
self.buffer_size = 30 # Number of samples for differentiation
self.pose_buffer = deque(maxlen=self.buffer_size)
self.time_buffer = deque(maxlen=self.buffer_size)
self.debug = debug
self.map = map
self.hl_controller = hl_controller
self.reset()
# Initialize Subscriber and Publisher
self.pose_sub = rospy.Subscriber(
"/vrpn_client_node/FP_exp_RL/pose", PoseStamped, self.pose_callback
)
self.goal_sub = rospy.Subscriber(
"/spacer_floating_platform/goal", Point, self.goal_callback
)
self.action_pub = rospy.Publisher(
"/spacer_floating_platform/valves/input", ByteMultiArray, queue_size=1
)
# Initialize ROS message for thrusters
self.thruster_msg = ByteMultiArray()
rospy.on_shutdown(self.shutdown)
def getObs(self) -> Dict[str, np.ndarray]:
"""
returns an up to date observation buffer.
Returns:
Dict[str, np.ndarray]: A dictionary containing the state of the simulation.
"""
state = {}
state["angular_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.ang_vel
)
state["linear_velocity"] = self.DR.noisy_observations.add_noise_on_vel(
self.lin_vel
)
state["position"] = self.DR.noisy_observations.add_noise_on_pos(self.pos)
state["quaternion"] = self.quat
return state
def reset(self) -> None:
"""
Resets the goal and the buffers."""
self.ready = False
self.hl_controller.initializeLoggers()
self.hl_controller.time = 0
self.count = 0
def shutdown(self) -> None:
"""
Shutdown the node and kills the thrusters while leaving the air-bearing on."""
self.thruster_msg.data = [1, 0, 0, 0, 0, 0, 0, 0, 0]
self.action_pub.publish(self.thruster_msg)
rospy.sleep(1)
self.thruster_msg.data = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.action_pub.publish(self.thruster_msg)
def remap_actions(self, actions: torch.Tensor) -> List[float]:
"""
Remaps the actions from the RL algorithm to the thrusters of the platform.
Args:
actions (torch.Tensor): The actions from the RL algorithm.
Returns:
List[float]: The actions for the thrusters."""
return [actions[i] for i in self.map]
def pose_callback(self, msg: Pose) -> None:
"""
Callback for the pose topic. It updates the state of the agent.
Args:
msg (Pose): The pose message."""
# current_time = rospy.Time.now()
current_time = msg.header.stamp
# Add current pose and time to the buffer
self.pose_buffer.append(msg)
self.time_buffer.append(current_time)
# Calculate velocities if buffer is filled
if len(self.pose_buffer) == self.buffer_size:
self.get_state_from_optitrack(msg)
self.ready = True
def get_state_from_optitrack(self, msg: Pose) -> None:
"""
Converts a ROS message to an observation.
Args:
msg (Pose): The pose message."""
pos = msg.pose.position
quat = msg.pose.orientation
self.pos = [pos.x, pos.y, pos.z]
self.quat = [quat.w, quat.x, quat.y, quat.z]
self.lin_vel, self.ang_vel = derive_velocities(
self.time_buffer, self.pose_buffer
)
def goal_callback(self, msg: Point) -> None:
"""
Callback for the goal topic. It updates the task data with the new goal data.
Args:
msg (Point): The goal message."""
self.hl_controller.setGoal(np.array([msg.x, msg.y, msg.z]))
def get_action(self, run_time: float, lifting_active: int = 1) -> None:
"""
Gets the action from the RL algorithm and publishes it to the thrusters.
Args:
lifting_active (int, optional): Whether or not the lifting thruster is active. Defaults to 1.
"""
self.state = self.getObs()
self.action = self.hl_controller.getAction(self.state, time=run_time)
# self.action = self.action * self.thruster_mask
action = self.remap_actions(self.action)
lifting_active = 1
action.insert(0, lifting_active)
self.thruster_msg.data = action
# self.action_pub.publish(self.thruster_msg)
def print_logs(self) -> None:
"""
Prints the logs."""
print("=========================================")
for key, value in self.hl_controller.logs.items():
print(f"{key}: {value[-1]}")
def run(self) -> None:
"""
Runs the RL algorithm."""
self.update_once = True
self.rate = rospy.Rate(self.play_rate)
start_time = rospy.Time.now()
run_time = rospy.Time.now() - start_time
while (not rospy.is_shutdown()) and (run_time.to_sec() < self.run_time):
if self.ready:
self.get_action(run_time.to_sec())
self.count += 1
if self.debug:
self.print_logs()
run_time = rospy.Time.now() - start_time
self.rate.sleep()
# Kills the thrusters once done
self.shutdown()
| 7,395 |
Python
| 33.082949 | 125 | 0.588235 |
elharirymatteo/RANS/omniisaacgymenvs/ros/run_ros_from_Isaac.py
|
__author__ = "Antoine Richard, Matteo El Hariry"
__copyright__ = (
"Copyright 2023, Space Robotics Lab, SnT, University of Luxembourg, SpaceR"
)
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Antoine Richard"
__email__ = "[email protected]"
__status__ = "development"
from omni.isaac.kit import SimulationApp
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omegaconf import DictConfig, OmegaConf
import hydra
import os
from omniisaacgymenvs.mujoco_envs.controllers.discrete_LQR_controller import (
DiscreteController,
parseControllerConfig,
)
from omniisaacgymenvs.mujoco_envs.controllers.RL_games_model_4_mujoco import (
RLGamesModel,
)
from omniisaacgymenvs.mujoco_envs.environments.mujoco_base_env import (
MuJoCoFloatingPlatform,
parseEnvironmentConfig,
)
from omniisaacgymenvs.mujoco_envs.controllers.hl_controllers import hlControllerFactory
from omniisaacgymenvs.ros.ros_utills import enable_ros_extension
@hydra.main(config_name="config_mujoco", config_path="../cfg")
def run(cfg: DictConfig):
""" "
Run the simulation.
Args:
cfg (DictConfig): A dictionary containing the configuration of the simulation.
"""
# print_dict(cfg)
cfg_dict = omegaconf_to_dict(cfg)
simulation_app = SimulationApp({"headless": True})
enable_ros_extension()
from omniisaacgymenvs.ros.ros_node import RLPlayerNode
import rospy
rospy.init_node("RL_player")
# Create the environment
env = MuJoCoFloatingPlatform(**parseEnvironmentConfig(cfg_dict))
# Get the low-level controller
if cfg_dict["use_rl"]:
assert os.path.exists(
cfg_dict["checkpoint"]
), "A correct path to a neural network must be provided to infer an RL agent."
ll_controller = RLGamesModel(
config=cfg_dict["train"], model_path=cfg_dict["checkpoint"]
)
else:
ll_controller = DiscreteController(**parseControllerConfig(cfg_dict, env))
dt = cfg_dict["task"]["sim"]["dt"]
# Get the high-level controller
hl_controller = hlControllerFactory(cfg_dict, ll_controller, dt)
node = RLPlayerNode(
hl_controller,
cfg=cfg_dict,
debug=True,
)
# Run the node.
node.run()
hl_controller.saveSimulationData()
hl_controller.plotSimulation()
# Close the simulationApp.
simulation_app.close()
if __name__ == "__main__":
# Initialize ROS node
run()
| 2,546 |
Python
| 27.943182 | 87 | 0.694423 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.