file_path
stringlengths 20
202
| content
stringlengths 9
3.85M
| size
int64 9
3.85M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 8
993
| alphanum_fraction
float64 0.26
0.93
|
---|---|---|---|---|---|---|
michaltakac/nerf-toy-car-aerodynamics/toy-car/constants.py
|
# Extension constants
bounds = [[-2, 2], [-1, 1], [-3, 3]]
resolution = (200, 200, 200)
| 88 |
Python
| 21.249995 | 36 | 0.556818 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/extension.py
|
import os
import torch
import shutil
import asyncio
import traceback
import omni.ext
import omni.usd
import omni.ui as ui
import numpy as np
from pathlib import Path
from modulus.hydra.utils import compose
from modulus_ext.ui.scenario import (
ModulusOVScenario,
ModulusOVButton,
ModulusOVFloatSlider,
ModulusOVIntSlider,
ModulusOVToggle,
ModulusOVRow,
ModulusOVText,
ModulusOVProgressBar,
)
from .visualizer import Visualizer
from .toy_car_runner import ModulusToyCarRunner
from .constants import bounds
from .src.toy_car import inlet_vel_range
class ToyCarScenario(ModulusOVScenario):
def __init__(self):
self._init_task = asyncio.ensure_future(self.deferred_init())
async def deferred_init(self):
super().__init__(name="Toy car aerodynamics simulator Omniverse Extension")
# Need to be a few frames in before init can occur.
# This is required for auto-loading of the extension
for i in range(15):
await omni.kit.app.get_app().next_update_async()
self.solver_train_initialized = False
self.solver_eval_initialized = False
self._eval_complete = False
self.resolution = [128, 128, 128]
vram_gb = torch.cuda.get_device_properties(0).total_memory / 10**9
eco = vram_gb < 13 # 12 Gb and below GPUs, turn on eco mode
self.inlet_velocity = 1.5
self.visualizer = Visualizer()
self._usd_context = omni.usd.get_context()
if self._usd_context.is_new_stage():
self.load_template()
param_text = ModulusOVText(
desc="Input Parameters",
)
self.add(param_text)
height_slider = ModulusOVFloatSlider(
name="Inlet Velocity",
desc="Inlet velocity from the top for Inference",
default_value=self.inlet_velocity,
bounds=inlet_vel_range,
update_func=self.update_inlet_velocity,
)
self.add(height_slider)
# Inference controls
self.inf_button = ModulusOVButton(
name="Inference",
desc="Perform Inference",
update_func=self.run_inference,
)
self.inf_button.run_in_main_thread = False
self.add(self.inf_button)
self.inf_progress = ModulusOVProgressBar(
desc="Inference Progress", default_value=0.0
)
self.inf_progress.inference_scale = 0.7
self.add(self.inf_progress)
# Visualization actions
isosurfaceButton = ModulusOVButton(
name="Isosurface",
desc="Generate Isosurface Visualization",
update_func=self.generate_isosurface,
)
streamlineButton = ModulusOVButton(
name="Streamline",
desc="Generate Streamline Visualization",
update_func=self.generate_streamlines,
)
sliceButton = ModulusOVButton(
name="Slice",
desc="Generate Slice Visualization",
update_func=self.generate_slices,
)
button_row = ModulusOVRow(
elements=[isosurfaceButton, streamlineButton, sliceButton]
)
self.add(button_row)
# Isosuface controls
control_text = ModulusOVText(
desc="Isosurface Controls",
)
self.add(control_text)
slider = ModulusOVFloatSlider(
name="Isovalue",
desc="Isosurface visualization isovalue",
default_value=0.001,
bounds=(0.001, 1.0),
update_func=self.update_isovalue,
)
self.add(slider)
# Streamline controls
control_text = ModulusOVText(
desc="Streamline Controls",
)
self.add(control_text)
slider = ModulusOVIntSlider(
name="Streamline Count",
desc="Streamline visualization count",
default_value=200,
bounds=(1, 400),
update_func=self.update_streamline_count,
)
self.add(slider)
slider = ModulusOVFloatSlider(
name="Streamline Step Size",
desc="Step Size used for Calculating Streamlines",
default_value=0.01,
bounds=(0.001, 0.1),
update_func=self.update_streamline_step_size,
)
self.add(slider)
slider = ModulusOVIntSlider(
name="Streamline Step Count",
desc="Number of Integration Steps to Calculate Streamlines",
default_value=1000,
bounds=(1, 2000),
update_func=self.update_streamline_step_count,
)
self.add(slider)
slider = ModulusOVFloatSlider(
name="Streamline Radius",
desc="Radius of Streamline Tubes",
default_value=0.02,
bounds=(0.0001, 0.1),
update_func=self.update_streamline_radius,
)
self.add(slider)
# Slice controls
control_text = ModulusOVText(
desc="Slice Controls",
)
self.add(control_text)
slider = ModulusOVFloatSlider(
name="Slice X Offset",
desc="Contour slice X offset from domain center",
default_value=0.0,
bounds=[bounds[0][0], bounds[0][1]],
update_func=self.update_slice_x_offset,
)
self.add(slider)
slider = ModulusOVFloatSlider(
name="Slice Y Offset",
desc="Contour slice Y offset from domain center",
default_value=0.0,
bounds=[bounds[1][0], bounds[1][1]],
update_func=self.update_slice_y_offset,
)
self.add(slider)
slider = ModulusOVFloatSlider(
name="Slice Z Offset",
desc="Contour slice Z offset from domain center",
default_value=0.0,
bounds=[bounds[2][0], bounds[2][1]],
update_func=self.update_slice_z_offset,
)
self.add(slider)
eco_toggle = ModulusOVToggle(
name="Eco Mode",
desc="For cards with limited memory",
default_value=eco,
update_func=self.toggle_eco,
)
self.add(eco_toggle)
self.register()
cfg = compose(config_name="config", config_path="conf", job_name="ToyCar")
self.simulator_runner = ModulusToyCarRunner(
cfg, progress_bar=self.inf_progress
)
self.simulator_runner.eco = eco
def load_template(self):
print("loading template")
usd_context = omni.usd.get_context()
template_file = Path(os.path.dirname(__file__)) / Path(
"../data/toy_car_template.usda"
)
self.template_temp_file = str(
Path(os.path.dirname(__file__))
/ Path("../data/toy_car_template_temp.usda")
)
shutil.copyfile(template_file, self.template_temp_file)
usd_context.open_stage(self.template_temp_file)
def toggle_eco(self, value):
print(f"Eco mode set to {value}")
self.simulator_runner.eco = value
def run_inference(self):
self.inf_button.text = "Running Inference..."
print("Toy car simulator inferencer started")
if self.simulator_runner.eco:
resolution_x = 64
resolution_y = 32
resolution_z = 64
else:
resolution_x = 128
resolution_y = 128
resolution_z = 128
if (resolution_x, resolution_y, resolution_z) != self.resolution:
print(
f"Initializing inferencer with a resolution of {resolution_x}*{resolution_y}*{resolution_z}"
)
self.resolution = [resolution_x, resolution_y, resolution_z]
print(
f"Will run inferencing for inlet_velocity={self.inlet_velocity}"
)
pred_vars = self.simulator_runner.run_inference(
inlet_velocity=self.inlet_velocity,
resolution=list(self.resolution),
)
shape = tuple(self.resolution)
u = pred_vars["u"].reshape(shape)
v = pred_vars["v"].reshape(shape)
w = pred_vars["w"].reshape(shape)
velocity = np.stack([u, v, w], axis=-1)
if velocity.dtype != np.float32:
velocity = velocity.astype(np.float32)
if velocity.shape != shape + (3,):
raise RuntimeError(f"expected shape: {shape + (3,)}; got: {velocity.shape}")
# Change to z axis first for VTK input (not sure why)
# Tensor comes out of inferencer in ij index form
velocity = np.ascontiguousarray(velocity.transpose(2, 1, 0, 3))
self.inf_progress.value = 0.95
np.seterr(invalid="ignore")
mask = np.where(velocity == self.simulator_runner.mask_value)
velocity[mask] = 0.0
velmag = np.linalg.norm(velocity, axis=3)
# velmag = velmag / np.amax(velmag)
minval = np.amin(velmag)
maxval = np.amax(velmag)
print("Test", maxval, minval)
self._velocity = velocity
self._velmag = velmag
# self._mask = spatial_mask
self._vel_mask = mask
self._bounds = np.array(self.simulator_runner.bounds).flatten()
print("ToyCarScenario inference ended")
self._eval_complete = True
self.inf_progress.value = 1.0
self.inf_button.text = "Inference"
def update_vis_data(self):
if not all(v is not None for v in [self._velocity, self._velmag, self._bounds]):
return
self.visualizer.update_data(
self._velocity, self._velmag, self._bounds, self._vel_mask, self.resolution
)
def update_inlet_velocity(self, value: float):
self.inlet_velocity = value
def update_isovalue(self, isovalue):
print(f"Updating isovalue: {isovalue}")
self.visualizer.parameters.isovalue = isovalue
self.visualizer.update_generated()
def update_streamline_count(self, streamline_count):
print(f"Updating streamline_count: {streamline_count}")
self.visualizer.parameters.streamline_count = streamline_count
self.visualizer.update_generated()
def update_streamline_step_size(self, streamline_step_size):
print(f"Updating streamline_step_size: {streamline_step_size}")
self.visualizer.parameters.streamline_step_size = streamline_step_size
self.visualizer.update_generated()
def update_streamline_step_count(self, streamline_step_count):
print(f"Updating streamline_step_count: {streamline_step_count}")
self.visualizer.parameters.streamline_step_count = streamline_step_count
self.visualizer.update_generated()
def update_streamline_radius(self, streamline_radius):
print(f"Updating streamline_radius: {streamline_radius}")
self.visualizer.parameters.streamline_radius = streamline_radius
self.visualizer.update_generated()
def update_slice_x_offset(self, slice_x_offset):
print(f"Updating slice_x_offset: {slice_x_offset}")
self.visualizer.parameters.slice_x_pos = slice_x_offset
self.visualizer.update_generated()
def update_slice_y_offset(self, slice_y_offset):
print(f"Updating slice_y_offset: {slice_y_offset}")
self.visualizer.parameters.slice_y_pos = slice_y_offset
self.visualizer.update_generated()
def update_slice_z_offset(self, slice_z_offset):
print(f"Updating slice_z_offset: {slice_z_offset}")
self.visualizer.parameters.slice_z_pos = slice_z_offset
self.visualizer.update_generated()
def generate_isosurface(self):
if not self._eval_complete:
print("Need to run inferencer first!")
return
self.update_vis_data()
self.visualizer.generate_isosurface()
def generate_streamlines(self):
if not self._eval_complete:
print("Need to run inferencer first!")
return
self.update_vis_data()
self.visualizer.generate_streamlines()
def generate_slices(self):
if not self._eval_complete:
print("Need to run inferencer first!")
return
self.update_vis_data()
self.visualizer.generate_slices()
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class ToyCarExt(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[modulus.scenario.ToyCar] Toy car aerodynamics scenario startup")
self.scenario = ToyCarScenario()
def on_shutdown(self):
self.scenario.__del__()
print("[modulus.scenario.ToyCar] Toy car aerodynamics scenario shutdown")
| 13,049 |
Python
| 33.8 | 119 | 0.609242 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/inferencer.py
|
from modulus.hydra import to_yaml
from modulus.hydra.utils import compose
from modulus.solver import Solver
from modulus.domain import Domain
from modulus.domain.inferencer import PointwiseInferencer, VoxelInferencer
from src.geometry import ToyCarDomain
from src.toy_car import network, constraints, inlet_vel
from src.plotter import generate_velocity_profile_3d, InferencerSlicePlotter2D
cfg = compose(config_path="conf", config_name="config_eval", job_name="toy_car_inference")
print(to_yaml(cfg))
def run():
geo = ToyCarDomain()
domain = Domain()
nodes = network(cfg, scale=geo.scale)
constraints(cfg, geo=geo, nodes=nodes, domain=domain)
inlet_vel_inference = 0.1
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.interior_mesh.sample_interior(1000000, parameterization={inlet_vel: inlet_vel_inference}),
output_names=["u", "v", "w", "p"],
batch_size=1024,
requires_grad=False,
plotter=InferencerSlicePlotter2D()
)
domain.add_inferencer(inferencer, "simulation")
# add meshgrid inferencer
mask_fn = lambda x, y, z: geo.interior_mesh.sdf({"x": x, "y": y, "z": z})[0] < 0
voxel_inference = VoxelInferencer(
bounds=[[-3, 3], [-3, 3], [-3, 3]],
npoints=[128, 128, 128],
nodes=nodes,
output_names=["u", "v", "w", "p"],
export_map={"u": ["u", "v", "w"], "p": ["p"]},
mask_fn=mask_fn,
batch_size=1024,
requires_grad=False,
)
domain.add_inferencer(voxel_inference, "simulation_voxel")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
# generate velocity profile with magnitude (it has V = [u, v, w] in one array)
generate_velocity_profile_3d()
if __name__ == "__main__":
run()
| 1,808 |
Python
| 29.15 | 108 | 0.642146 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/visualizer.py
|
import omni.usd
import omni.timeline
# Import the HPC visualization pipeline
from hpcvis.vtkm_bridge.core import get_bridge_interface
import numpy as np
from pxr import Sdf, Usd, UsdGeom, UsdUtils
import types
from dataclasses import dataclass
from typing import List
from .constants import bounds
# Put interface object publicly to use in our API
_vtkm_bridge = None
class VisParameters:
def __init__(self):
self.bounds = np.array(bounds).flatten()
self.isovalue = 0.001
self.streamline_count = 200
self.streamline_step_size = 0.01
self.streamline_step_count = 750
self.streamline_radius = 0.02
self.streamline_height = 0.0
self._slice_x_pos = 0.5
self._slice_y_pos = 0.5
self._slice_z_pos = 0.5
@property
def slice_x_pos(self):
return self._slice_x_pos
@slice_x_pos.setter
def slice_x_pos(self, offset):
self._slice_x_pos = max(
min((offset - self.bounds[0]) / (self.bounds[1] - self.bounds[0]), 1), 0
)
@property
def slice_y_pos(self):
return self._slice_y_pos
@slice_y_pos.setter
def slice_y_pos(self, offset):
self._slice_y_pos = max(
min((offset - self.bounds[2]) / (self.bounds[3] - self.bounds[2]), 1), 0
)
@property
def slice_z_pos(self):
return self._slice_z_pos
@slice_z_pos.setter
def slice_z_pos(self, offset):
self._slice_z_pos = max(
min((offset - self.bounds[4]) / (self.bounds[5] - self.bounds[4]), 1), 0
)
class Visualizer:
def __init__(self):
# Get the vtkm bridge context
self._vtkm_bridge = None
print(
f"[modulus_ext.scenario.toy_car.visualizer]_vtkm_bridge interface: {self._vtkm_bridge}"
)
self.parameters = VisParameters()
self.velocity = None
self.all_points = None
self.bounds = None
self._stage_id = None
self._isosurface_primname = None
self._streamlines_primname = None
self._slice_primname_x = None
self._slice_primname_y = None
self._slice_primname_z = None
self._seedpoints = None
self._usd_context = None
self.timeline = omni.timeline.acquire_timeline_interface()
def get_geometry_prim(self, bridge_prim_name: str):
stage = self._usd_context.get_stage()
new_suffix = "_geometry"
prim_name = bridge_prim_name.rsplit("_", maxsplit=1)[0] + new_suffix
return stage.GetPrimAtPath(f"/RootClass/geometries/{prim_name}")
def focus_prim(self, prim: Usd.Prim):
if not prim.IsValid():
return
self._usd_context.get_selection().set_selected_prim_paths(
[str(prim.GetPath())], True
)
try:
import omni.kit.viewport_legacy
viewport = omni.kit.viewport_legacy.get_viewport_interface()
if viewport:
viewport.get_viewport_window().focus_on_selected()
except:
raise
pass
def update_data(
self,
velocity: np.ndarray,
velmag: np.ndarray,
bounds: List[int],
mask: np.ndarray = None,
resolution: List[int] = [190, 190, 190],
):
self.velocity = velocity
self.bounds = bounds
self.velmag = velmag
def nan_ptp(a):
return np.ptp(a[np.isfinite(a)])
self.velmag = (self.velmag - np.nanmin(self.velmag))/nan_ptp(self.velmag)
coords_x = np.linspace(self.bounds[0], self.bounds[1], resolution[0])
coords_y = np.linspace(self.bounds[2], self.bounds[3], resolution[1])
coords_z = np.linspace(self.bounds[4], self.bounds[5], resolution[2])
Z, Y, X = np.meshgrid(coords_z, coords_y, coords_x, indexing="ij")
self.all_points = np.array(
np.transpose([C.flatten() for C in [X, Y, Z]]),
copy=True,
order="C",
dtype=np.float32,
)
duplicated_velmag = np.expand_dims(self.velmag, axis=-1)
np.seterr(invalid="ignore")
self.normalized_velocity = self.velocity / duplicated_velmag
#self.normalized_velocity = self.velocity / np.amax(self.velocity)
self.normalized_velocity[mask] = 0
self.update_stage()
self._vtkm_bridge.set_field_data("toy_car_velocity", velocity, n_components=3)
self._vtkm_bridge.set_field_data(
"toy_car_normalized_velocity", self.normalized_velocity, n_components=3
)
self._vtkm_bridge.set_field_data("toy_car_velmag", velmag, n_components=1)
self._vtkm_bridge.set_regular_grid_bounds("toy_car", *bounds)
self._vtkm_bridge.set_regular_grid_extent(
"toy_car", *tuple(reversed(velmag.shape[:3]))
)
if self._seedpoints is not None:
self._vtkm_bridge.set_points("toy_car_points", self._seedpoints)
self.update_generated()
def update_generated(self):
if self._isosurface_primname:
self.generate_isosurface()
if self._streamlines_primname:
self.generate_streamlines()
if self._slice_primname_x or self._slice_primname_y or self._slice_primname_z:
self.generate_slices()
def update_stage(self):
if self._vtkm_bridge is None:
self._vtkm_bridge = get_bridge_interface()
# Use the bridge to generate an isosurface on the data
if self._usd_context is None:
self._usd_context = omni.usd.get_context()
stage = self._usd_context.get_stage()
stage_cache = UsdUtils.StageCache.Get()
stage_id = stage_cache.GetId(stage).ToLongInt()
if stage_id == self._stage_id:
return
self._stage_id = stage_id
self._vtkm_bridge.set_stage(stage_id)
def random_subset(self, points, values, npoints=25):
nonzero_selection = self.velmag.ravel() > 0.001 # Only points with some velocity
points_nonzero = points[nonzero_selection]
velmag_nonzero = self.velmag.ravel()[nonzero_selection]
print(f"points_nonzero: {points_nonzero[:10]}")
print(f"velmag_nonzero: {velmag_nonzero[:10]}")
points_nonzero_shuffle = np.random.shuffle(points_nonzero)
points_subset = points_nonzero[:npoints]
velmag_subset = velmag_nonzero[:npoints]
return points_subset
def generate_streamlines(self):
self.update_stage()
# Use the bridge to generate streamlines on the data
np.random.seed(42)
self._seedpoints = self.random_subset(
self.all_points, self.velocity, npoints=self.parameters.streamline_count
)
self._vtkm_bridge.set_points("toy_car_points", self._seedpoints)
temp = self._streamlines_primname
self._streamlines_primname = self._vtkm_bridge.visualize_streamlines(
enabled=True,
streamline_name="toy_car_streamlines",
velocity_grid_name="toy_car",
velocity_data_array_name="toy_car_normalized_velocity",
sample_quantity_name="toy_car_velmag",
seed_points_name="toy_car_points",
step_size=self.parameters.streamline_step_size,
n_steps=int(self.parameters.streamline_step_count),
enable_tube_filter=True,
tube_radius=self.parameters.streamline_radius,
)
if not self._streamlines_primname:
print("Problem with streamline generation. Keeping old primname.")
self._streamlines_primname = temp
print(f"visualized streamlines: {self._streamlines_primname}")
if not temp and self._streamlines_primname:
prim = self.get_geometry_prim(self._streamlines_primname)
self.focus_prim(prim)
self.timeline.set_end_time(10)
def generate_isosurface(self):
self.update_stage()
# velocity magnitude isosurface
isosurface_prim = self._vtkm_bridge.visualize_isosurface(
enabled=True,
isosurface_name="toy_car_isosurface",
regular_grid_name="toy_car",
field_data_name="toy_car_velmag",
sample_quantity_name="toy_car_velmag",
isovalue=self.parameters.isovalue,
)
print(f"visualized isosurface: {self._isosurface_primname}")
if not self._isosurface_primname:
print("Problem with isosurface generation. Keeping old primname.")
self._isosurface_primname = isosurface_prim
if not isosurface_prim and self._isosurface_primname:
prim = self.get_geometry_prim(self._isosurface_primname)
self.focus_prim(prim)
def generate_slices(self):
self.update_stage()
temp_x = self._slice_primname_x
temp_y = self._slice_primname_y
temp_z = self._slice_primname_z
# Use the bridge to generate slices for the data
self._slice_primname_x = self._vtkm_bridge.visualize_slice(
enabled=True,
slice_name="toy_car_slice_x",
regular_grid_name="toy_car",
field_data_name="toy_car_velmag",
az=90.,
el=0.0,
pos=self.parameters.slice_x_pos,
)
print(f"visualized slice: {self._slice_primname_x}")
self._slice_primname_y = self._vtkm_bridge.visualize_slice(
enabled=True,
slice_name="toy_car_slice_y",
regular_grid_name="toy_car",
field_data_name="toy_car_velmag",
az=0.0,
el=0.0,
pos=self.parameters.slice_y_pos,
)
print(f"visualized slice: {self._slice_primname_y}")
self._slice_primname_z = self._vtkm_bridge.visualize_slice(
enabled=True,
slice_name="toy_car_slice_z",
regular_grid_name="toy_car",
field_data_name="toy_car_velmag",
az=0.0,
el=90,
pos=self.parameters.slice_z_pos,
)
print(f"visualized slice: {self._slice_primname_z}")
if not self._slice_primname_x:
print("Problem with slice generation. Keeping old primname.")
self._slice_primname_x = temp_x
if not self._slice_primname_y:
print("Problem with slice generation. Keeping old primname.")
self._slice_primname_y = temp_y
if not self._slice_primname_z:
print("Problem with slice generation. Keeping old primname.")
self._slice_primname_z = temp_z
if not temp_z and self._slice_primname_z:
prim = self.get_geometry_prim(self._slice_primname_z)
self.focus_prim(prim)
| 10,751 |
Python
| 34.022801 | 99 | 0.596317 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/trainer.py
|
from modulus.hydra import to_yaml
from modulus.hydra.utils import compose
from modulus.solver import Solver
from modulus.domain import Domain
from modulus.domain.inferencer import PointwiseInferencer
from src.geometry import ToyCarDomain
from src.toy_car import network, constraints, inlet_vel
cfg = compose(config_path="conf", config_name="config", job_name="toy_car_training")
print(to_yaml(cfg))
def run():
geo = ToyCarDomain()
domain = Domain()
nodes = network(cfg, scale=geo.scale)
constraints(cfg, geo=geo, nodes=nodes, domain=domain)
inlet_vel_inference = 1.5
# add inferencer
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.interior_mesh.sample_interior(1000000, parameterization={inlet_vel: inlet_vel_inference}),
output_names=["u", "v", "w", "p"],
batch_size=4096,
)
domain.add_inferencer(inferencer, "inf_data")
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.interior_mesh.sample_interior(5000, parameterization={inlet_vel: inlet_vel_inference}),
output_names=["u", "v", "w", "p"],
batch_size=256,
)
domain.add_inferencer(inferencer, "inf_data_small")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| 1,325 |
Python
| 26.624999 | 108 | 0.672453 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/src/geometry.py
|
import numpy as np
from sympy import sqrt, Max
from modulus.hydra import to_absolute_path
from modulus.geometry.tessellation import Tessellation
from modulus.geometry.primitives_3d import Box, Channel, Plane
class ToyCarDomain:
"""Toy car geometry inside channel"""
def __init__(self):
# read stl files to make geometry
point_path = to_absolute_path("./stl_files")
car_mesh = Tessellation.from_stl(
point_path + "/toy_bmw.stl", airtight=True
)
# scale and normalize mesh and openfoam data
self.center = (0, 0, 0)
self.scale = 1.0
self.car_mesh = self.normalize_mesh(car_mesh, self.center, self.scale)
# geometry params for domain
channel_origin = (-2.5, -0.5, -0.5625)
channel_dim = (5.0, 1.0, 1.125)
# channel
channel = Channel(
channel_origin,
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
)
# normalize meshes
def normalize_mesh(self, mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
| 1,267 |
Python
| 26.565217 | 78 | 0.5809 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/src/toy_car.py
|
from sympy import Symbol, Eq
from modulus.hydra import instantiate_arch, ModulusConfig
from modulus.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.key import Key
from modulus.eq.pdes.navier_stokes import NavierStokes
from modulus.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.eq.pdes.basic import NormalDotVec
# params for simulation
#############
# Real Params
#############
# fluid params
fluid_viscosity = 1.84e-05 # kg/m-s
fluid_density = 1.1614 # kg/m3
# boundary params
length_scale = 0.04 # m
inlet_velocity = 5.24386 # m/s
##############################
# Nondimensionalization Params
##############################
# fluid params
nu = fluid_viscosity / (fluid_density * inlet_velocity * length_scale)
rho = 1
normalize_inlet_vel = 1.0
# heat params
D_solid = 0.1
D_fluid = 0.02
inlet_T = 0
source_grad = 1.5
source_area = source_dim[0] * source_dim[2]
u_profile = (
normalize_inlet_vel
* tanh((0.5 - Abs(y)) / 0.02)
* tanh((0.5625 - Abs(z)) / 0.02)
)
volumetric_flow = 1.0668 # value via integration of inlet profile
inlet_vel = Symbol("inlet_velocity")
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# parameterization
inlet_vel_range = (0.05, 10.0)
inlet_vel_params = {inlet_vel: inlet_vel_range}
def network(cfg: ModulusConfig, scale):
# make list of nodes to unroll graph on
ze = ZeroEquation(nu=nu, dim=3, time=False, max_distance=0.5)
ns = NavierStokes(nu=ze.equations["nu"], rho=1.0, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z"), Key("inlet_velocity")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
cfg=cfg.arch.fully_connected,
)
return (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network", jit=cfg.jit)]
)
def constraints(cfg: ModulusConfig, geo, nodes, domain):
# add constraints to solver
# inlet
constraint_inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.inlet,
outvar={"u": u_profile, "v": 0, "w": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
lambda_weighting={"u": 1.0, "v": 1.0, "w": 1.0},
batch_per_epoch=5000,
)
domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
batch_per_epoch=5000,
)
domain.add_constraint(constraint_outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo.noslip_mesh,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
parameterization=inlet_vel_params
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo.interior_mesh,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.interior,
parameterization=inlet_vel_params
)
domain.add_constraint(interior, "interior")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=integral_plane,
outvar={"normal_dot_vel": volumetric_flow},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
criteria=integral_criteria,
lambda_weighting={"normal_dot_vel": 1.0},
parameterization={**x_pos_range, **param_ranges},
batch_per_epoch=5000,
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
# add pressure monitor
invar_front_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] - 0.65,
**fixed_param_ranges,
},
)
pressure_monitor = PointwiseMonitor(
invar_front_pressure,
output_names=["p"],
metrics={"front_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
domain.add_monitor(pressure_monitor)
invar_back_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] + 2 * 0.65,
**fixed_param_ranges,
},
)
pressure_monitor = PointwiseMonitor(
invar_back_pressure,
output_names=["p"],
metrics={"back_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
domain.add_monitor(pressure_monitor)
| 5,031 |
Python
| 28.952381 | 84 | 0.619758 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/src/plotter.py
|
import numpy as np
import scipy.interpolate
import matplotlib.pyplot as plt
from modulus.hydra import to_absolute_path
from modulus.utils.io.vtk import var_to_polyvtk
from modulus.utils.io import InferencerPlotter
def generate_velocity_profile_3d():
data = np.load(to_absolute_path("outputs/toy_car/inferencers/simulation.npz"), allow_pickle=True)
data = np.atleast_1d(data.f.arr_0)[0]
# velocity in 3D
pos = np.dstack((data["x"], data["y"], data["z"]))
V = np.dstack((data["u"], data["v"], data["w"]))
save_var = {
"x": data["x"],
"y": data["y"],
"z": data["z"],
"p": data["p"],
"pos": pos,
"V": V,
}
var_to_polyvtk(save_var, to_absolute_path("outputs/toy_car/inferencers/velocity_profile"))
class InferencerSlicePlotter2D(InferencerPlotter):
"Default plotter class for inferencer"
def __call__(self, invar, outvar):
"Default function for plotting inferencer data"
# get input variables
x, y = invar["x"][:, 0], invar["y"][:, 0]
bounds = (x.min(), x.max(), y.min(), y.max())
extent, outvar = self.interpolate_output(100, x, y, bounds, outvar)
# make plots
fs = []
for k in outvar:
f = plt.figure(figsize=(5, 4), dpi=144)
plt.imshow(outvar[k].T, origin="lower", extent=extent, cmap="jet")
plt.xlabel("x")
plt.ylabel("y")
plt.colorbar(location="bottom")
plt.title(k)
plt.tight_layout()
fs.append((f, k))
return fs
@staticmethod
def interpolate_output(size, x, y, extent, *outvars):
"Interpolates irregular points onto a mesh"
# define mesh to interpolate onto
xyi = np.meshgrid(
np.linspace(extent[0], extent[1], size),
np.linspace(extent[2], extent[3], size),
indexing="ij",
)
# interpolate outvars onto mesh
outvars_interp = []
for outvar in outvars:
outvar_interp = {}
for k in outvar:
outvar_interp[k] = scipy.interpolate.griddata(
(x, y), outvar[k][:, 0], tuple(xyi)
)
outvars_interp.append(outvar_interp)
return [extent] + outvars_interp
| 2,310 |
Python
| 29.407894 | 101 | 0.55974 |
michaltakac/nerf-toy-car-aerodynamics/toy-car/conf/config.yaml
|
defaults :
- modulus_default
- arch:
- fully_connected
- scheduler: tf_exponential_lr
- optimizer: adam
- loss: sum
- _self_
save_filetypes: "vtk,np"
scheduler:
decay_rate: 0.95
decay_steps: 15000
training:
rec_results_freq : 200
rec_constraint_freq: 50000
max_steps : 1500000
batch_size:
inlet: 650
outlet: 650
no_slip: 5200
interior: 6000
integral_continuity: 310
| 407 |
YAML
| 14.692307 | 32 | 0.670762 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/main.py
|
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
from robot.scene.task import Env
from UOC.app import Segmenter
from VM.matcher import VisualMatcher
from ui import WidgetsExtension
image_folder = "E:/workspace/visual_match/images"
def save_rgbs(rgbs, folder, prefix):
import cv2
import os
for i, rgb in enumerate(rgbs):
f = os.path.join(folder, f"{prefix}_{i}.png")
cv2.imwrite(f, rgb)
matcher = VisualMatcher()
seger = Segmenter()
env = Env(save_folder=image_folder)
ext = WidgetsExtension()
ext.init_window(env, image_folder)
if simulation_app.is_running():
env.reset()
env.move_to_init()
while not env.is_start:
env.idle(1)
env.scene.load_objects()
env.idle(200)
tg_data = env.save_images('tg')
ext.show_target_img()
print("Segment target")
tg_rgbs, tg_bbox = seger.segment_and_crop( tg_data[0], tg_data[1], tg_data[2] )
tg_pcs = seger.crop_point_cloud(tg_data[1], tg_data[2], tg_bbox)
# save_rgbs(tg_rgbs, image_folder, 'tg')
env.reset()
env.scene.load_objects_2()
env.idle(200)
sc_data = env.save_images('sc')
ext.show_source_img()
print("Segment source")
sc_rgbs, sc_bbox = seger.segment_and_crop( sc_data[0], sc_data[1], sc_data[2] )
sc_pcs = seger.crop_point_cloud(sc_data[1], sc_data[2], sc_bbox)
# save_rgbs(sc_rgbs, image_folder, 'sc')
print("Match objects")
s_list, t_list = matcher.match_images(sc_rgbs, tg_rgbs, env.scene.names)
print(s_list)
print(t_list)
# generate grasp
print("Compute pick poses")
sc_mats = env.get_pick_mat(sc_pcs)
tg_mats = env.get_pick_mat(tg_pcs)
min_num = len(s_list)
if len(s_list) > len(t_list):
min_num = len(t_list)
for index in range(min_num):
env.move_to_mat(sc_mats[ index ], 0.3)
env.gripper_close()
env.move_up(0.3)
mat = tg_mats[ t_list[index] ]
mat[:3, 3][2] += 0.05
env.move_to_mat(mat, 0.3)
env.gripper_open()
env.idle(20)
env.move_up(0.3)
env.move_to_left()
env.idle(50)
fin_data = env.save_images('fin')
ext.show_final_img()
env.world.pause()
while simulation_app.is_running():
env.world.step(render=True)
simulation_app.close()
| 2,319 |
Python
| 23.946236 | 83 | 0.62182 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/evaluate_lvis.py
|
import os
import numpy as np
from VM.lvis import Lvis
from VM.matcher import VisualMatcher
if __name__ == "__main__":
dataset = Lvis()
matcher = VisualMatcher()
np.random.seed(6)
task_num = 1
num_per_task = 8
all_rate = []
test_mode = 'CLIP_V'
test_mode = 'CLIP_K'
# test_mode = 'CLIP_N'
all_test = ['CLIP_N', 'CLIP_K', 'CLIP_V']
all_test = ['CLIP_N']
for test_mode in all_test:
total_rate = []
for data in dataset.random_test(task_num, num_per_task):
source_list, target_list, label_list = data
use_text = True
if test_mode == 'CLIP_V':
use_text = False
elif test_mode == 'CLIP_K':
label_list = dataset.cat_names
source_ids, target_ids = matcher.match_images( source_list, target_list, label_list, use_text )
match_rate = sum(source_ids == target_ids) / num_per_task
total_rate.append(match_rate)
rate = np.mean(total_rate)
all_rate.append(rate)
print( f"Total categories: {dataset.cat_num}" )
print( f" Task num : {task_num}" )
print( f" Num pre task: {num_per_task}" )
print( "-"*20 )
for i in range(len(all_rate)):
print("%7s: %.3f" % (all_test[i], all_rate[i]))
A = 1
# Total categories: 414
# Task num : 200
# Num pre task: 20
# --------------------
# CLIP_N: 0.577
# CLIP_K: 0.541
# CLIP_V: 0.403
# Total categories: 414
# Task num : 500
# Num pre task: 8
# --------------------
# CLIP_N: 0.771
# CLIP_K: 0.724
# CLIP_V: 0.542
| 1,626 |
Python
| 21.915493 | 107 | 0.523985 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/test.py
|
import os, sys
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from UOC.app import Segmenter
from robot.tools import pv_tools
if __name__ == "__main__":
seg = Segmenter()
rgb = np.array(Image.open("./images/sc_rgb.png"))[:,:,:3][:,:,::-1].copy()
dep = np.load("./images/sc_dep.npy")
cam = np.load("./images/sc_cam.npy")
c2w = np.load("./images/sc_c2w.npy")
camera_params = {}
# camera_params['x_offset'] = cam[0,0]
# camera_params['y_offset'] = cam[1,1]
# camera_params['fx'] = cam[0,2]
# camera_params['fy'] = cam[1,2]
camera_params['c2w'] = c2w
camera_params['cam'] = cam
print(c2w)
print(cam)
all_rgb, all_bbox = seg.segment_and_crop(rgb, dep, camera_params)
all_points = seg.crop_point_cloud(dep, camera_params, all_bbox)
all_pc = []
for points in all_points:
all_pc.append(pv_tools.get_pc(points))
mat = np.eye(4)
mat[:3,3] = c2w[:3,3]
cam_ax = pv_tools.get_axis(mat)
ax = pv_tools.get_axis()
all_pc.append(cam_ax)
all_pc.append(ax)
pv_tools.show_mesh(all_pc)
a = 1
| 1,138 |
Python
| 22.244898 | 78 | 0.580844 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/README.md
|
## 计算机前沿技术课程 复现工作
## 复现论文:Semantically Grounded Object Matching for Robust Robotic Scene Rearrangement [🧷](https://arxiv.org/abs/2111.07975)
### 只进行物体匹配:
> 环境依赖
python >= 3.6
pytorch >= 1.7.1
clip (https://github.com/openai/CLIP)
> 核心代码
物体匹配的复现代码在`VM`(visual matching)文件夹下,其中[`matcher.py`](./VM/matcher.py)里面实现了物体匹配的算法,你可以参考`evaluate_lvis.py`。
```python
from VM.matcher import VisualMatcher
matcher = VisualMatcher()
source_list = xxx # sourse images
target_list = xxx # goal images
label_list = xxx # object labels
use_text = True
source_ids, target_ids = matcher.match_images( source_list, target_list, label_list, use_text )
```
> 测试数据集
请参考`VM/data/README.md`下载数据集后进行处理。
### 进行机器人重排列实验:
> 环境依赖
python >= 3.6
pyvista
pytorch >= 1.7.1
clip (https://github.com/openai/CLIP)
> 安装模拟环境(要求RTX显卡):
omiverse isaac sim (https://developer.nvidia.com/isaac-sim)
> 核心代码
`robot`:内是isaac sim的模拟环境控制代码
`UOC`:修改自[https://github.com/NVlabs/UnseenObjectClustering](https://github.com/NVlabs/UnseenObjectClustering)的代码,我在里面写了个`app.py`,从中提取出了一个`Segmenter`类作为实例分割模块。使用前请下载好他们的训练权重,放到`UOC/data/checkpoints`下
`main.py`:机器人重排列的主代码
`ui.py`:设置isaac sim的界面代码
`run.bat`:执行`main.py`的命令
| 1,219 |
Markdown
| 18.365079 | 198 | 0.71534 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/ui.py
|
import omni.ext
import omni.ui
import numpy as np
import os
def add_separator(height=4, width=2, color=0xff202020):
omni.ui.Spacer(height=height)
omni.ui.Line(style={"border_width":width, "color":color})
omni.ui.Spacer(height=height)
def add_label(text):
# with omni.ui.HStack(width=0):
omni.ui.Spacer(width=8)
label = omni.ui.Label(text)
label.alignment = omni.ui._ui.Alignment.H_CENTER
omni.ui.Spacer(width=4)
return label
def add_btn(text, enabled=True, scale=1):
omni.ui.Spacer(height=4)
btn = omni.ui.Button(text)
btn.height *= scale
omni.ui.Spacer(height=4)
btn.enabled = enabled
return btn
# ----------------------------------------------------------.
class WidgetsExtension(omni.ext.IExt):
# ------------------------------------------------.
# Init window.
# ------------------------------------------------.
def init_window (self, env=None, image_folder=None):
self.env = env
self.image_folder = image_folder
# Create new window.
self._window = omni.ui.Window("Widgets Window", width=340, height=600)
# ------------------------------------------.
with self._window.frame:
# Create window UI.
with omni.ui.VStack(height=0):
self.btn = add_btn(" START ", scale=3)
self.btn.set_clicked_fn(self.onButtonClicked)
add_separator(8)
self.target_label = add_btn("Target", False)
self.target_image = omni.ui.Image("", width=280, height=180)
# Separator.
add_separator(8)
self.source_label = add_btn("Source", False)
self.source_image = omni.ui.Image("", width=280, height=180)
# Separator.
add_separator(8)
self.final_label = add_btn("Final", False)
self.final_image = omni.ui.Image("", width=280, height=180)
def onButtonClicked(self):
if self.env is not None:
self.env.is_start = True
self.btn.enabled = False
# index = np.random.random_integers(0,5)
# folder = f"E:/workspace/visual_match/images/sc_{index}.png"
# if index == 4:
# folder = f"E:/workspace/visual_match/images/sc_rgb.png"
# print( type(self.source_label.alignment) )
# self.source_image.source_url = folder
def show_source_img(self):
self.source_image.source_url = os.path.join(self.image_folder, "sc_rgb.png")
def show_target_img(self):
self.target_image.source_url = os.path.join(self.image_folder, "tg_rgb.png")
def show_final_img(self):
self.final_image.source_url = os.path.join(self.image_folder, "fin_rgb.png")
# ------------------------------------------------.
# Term window.
# ------------------------------------------------.
def term_window (self):
if self._window != None:
self._window = None
# ------------------------------------------------.
# Startup.
# ------------------------------------------------.
def on_startup(self, ext_id=0):
self.init_window()
# ------------------------------------------------.
# Shutdown.
# ------------------------------------------------.
def on_shutdown(self):
self.term_window()
| 3,385 |
Python
| 33.55102 | 84 | 0.49099 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/test.py
|
from omni.isaac.kit import SimulationApp
simulation_app = SimulationApp({"headless": False})
# 28.53s: False
# 27.53s: True
import omni
from omni.isaac.core import World
import numpy as np
import os
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.utils.prims import create_prim
from pxr import Gf, UsdPhysics
from pxr import UsdPhysics, PhysxSchema, UsdGeom, Gf, UsdLux, Usd, Sdf
from omni.isaac.core.robots import Robot
from omni.isaac.manipulators.grippers import ParallelGripper
from omni.isaac.surface_gripper._surface_gripper import Surface_Gripper_Properties, Surface_Gripper
from omni.isaac.dynamic_control import _dynamic_control
from robot.tools.omni_tools import *
# from tools.ycb import get_random_name
from tools.graspnet import get_random_name
import time
from omni.isaac.core.utils.prims import is_prim_path_valid
start = time.time()
my_world = World(stage_units_in_meters=1.0, physics_prim_path="/World/physicsScene")
my_world.scene.add_default_ground_plane()
def test_gripper():
stage = omni.usd.get_context().get_stage()
prim_path = "/World/defaultGroundPlane/tmp"
tmp = stage.DefinePrim( prim_path, "Xform")
set_translate(tmp, [0, 0.5, 0])
prim_path = "/World/defaultGroundPlane/tmp/a"
tmp = stage.DefinePrim( prim_path, "Xform")
set_translate(tmp, [0, 0, 0])
add_box(stage, prim_path + "/vc", [0.04, 0.04, 0.04], [0, 0, 0.08], [1,0,0,0], [1,1,0], True, True )
add_box(stage, prim_path + "/obj", [0.04, 0.04, 0.04], [0.0, 0, 0.02], [1,0,0,0], [1,0,0], True, True )
# Gripper properties
sgp = Surface_Gripper_Properties()
sgp.d6JointPath = prim_path + "/vc/APP"
sgp.parentPath = prim_path + "/vc"
sgp.offset = _dynamic_control.Transform()
# sgp.offset.p.x = 0
sgp.offset.p.z = -0.0201
sgp.offset.r = [0.7071, 0, 0.7071, 0] # Rotate to point gripper in Z direction
sgp.gripThreshold = 0.02
sgp.forceLimit = 1.0e2
sgp.torqueLimit = 1.0e3
sgp.bendAngle = np.pi / 4
sgp.stiffness = 1.0e4
sgp.damping = 1.0e3
dc = _dynamic_control.acquire_dynamic_control_interface()
print(dc)
gripper = Surface_Gripper(dc)
gripper.initialize(sgp)
return gripper
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.controllers import BaseController
import numpy as np
class CoolController(BaseController):
def __init__(self):
super().__init__(name="my_cool_controller")
# An open loop controller that uses a unicycle model
return
def forward(self, command):
# A controller has to return an ArticulationAction
return ArticulationAction(joint_positions=command)
def set_drive_parameters(drive, target_type, target_value, stiffness=None, damping=None, max_force=None):
"""Enable velocity drive for a given joint"""
if target_type == "position":
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
elif target_type == "velocity":
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
if stiffness is not None:
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
if damping is not None:
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
if max_force is not None:
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
class DataGenerate(object):
def __init__(self, world, ycb_folder="E:/dataset/ycb", save_folder="E:/dataset/tap/train", object_num=10, sample_range=[0,6], start_num=0) -> None:
self.world = world
self.save_folder = save_folder
self.ycb_folder = ycb_folder
self.object_num = object_num
self.sample_range = sample_range
self.all_prims = []
self.all_paths = []
self.names = []
self.state_num = start_num
# def set_controller(self):
# dc = _dynamic_control.acquire_dynamic_control_interface()
# articulation = dc.get_articulation(path)
# # Call this each frame of simulation step if the state of the articulation is changing.
# self.dc = dc
# self.articulation = articulation
# self.articulation = self.robot.get_articulation_controller()
def config(self):
stage = omni.usd.get_context().get_stage()
PhysxSchema.PhysxArticulationAPI.Get(stage, "/World/pusher").CreateSolverPositionIterationCountAttr(64)
PhysxSchema.PhysxArticulationAPI.Get(stage, "/World/pusher").CreateSolverVelocityIterationCountAttr(64)
self.gripper_left = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath( f"/World/pusher/center/c_left"), "linear")
self.gripper_right = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath( f"/World/pusher/center/c_right"), "linear")
self.gripper_top = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath( f"/World/pusher/center/c_top"), "linear")
self.gripper_down = UsdPhysics.DriveAPI.Get(stage.GetPrimAtPath( f"/World/pusher/center/c_down"), "linear")
def init_pusher(self):
set_drive_parameters(self.gripper_left, "position", 0 )
set_drive_parameters(self.gripper_right, "position", 0 )
set_drive_parameters(self.gripper_top, "position", 0 )
set_drive_parameters(self.gripper_down, "position", 0 )
def close(self):
pos = self.gripper_left.GetTargetPositionAttr().Get()
step = 0.001
pos = pos-step
min_value = -0.05
if pos < min_value:
pos = min_value
set_drive_parameters(self.gripper_left, "position", pos )
set_drive_parameters(self.gripper_right, "position", pos )
set_drive_parameters(self.gripper_top, "position", pos )
set_drive_parameters(self.gripper_down, "position", pos )
def open(self):
pos = self.gripper_left.GetTargetPositionAttr().Get()
step = 0.001
pos = pos + step
if pos > 0.3:
pos = 0.3
set_drive_parameters(self.gripper_left, "position", pos )
set_drive_parameters(self.gripper_right, "position", pos )
set_drive_parameters(self.gripper_top, "position", pos )
set_drive_parameters(self.gripper_down, "position", pos )
def add_model(self, obj_name, position=[0,0,0], orientation=[1, 0, 0, 0], scale=[1,1,1]):
YCB_DIRECTORY = "E:/dataset/ycb"
if 'ycb' in self.ycb_folder:
usd_path = f'{self.ycb_folder}/{obj_name}/google_16k/text.usd'
else:
usd_path = f'{self.ycb_folder}/{obj_name}/omni/simple.usd'
prim_path = f"/World/obj_{obj_name}"
prim = load_obj_usd(usd_path, prim_path, position, orientation, scale, set_rigid=True, kinematic=False)
return prim
def generate_ycb(self):
remove(self.all_paths)
w_range = 0.3
o_range = 0.25
container_weight = 0.05
support_translate = np.array([0,0,0])
stage = omni.usd.get_context().get_stage()
height = w_range * 4
half_height = height/2
pusher_path = "/World/pusher"
if not is_prim_path_valid(pusher_path):
# w1 = create_pusher( stage, "/World/W1", pos=support_translate + [ w_range/2 + container_weight*2, 0, half_height ], size=[ container_weight, w_range * 1, height] , axis='X')
# w2 = create_pusher( stage, "/World/W2", pos=support_translate - [ w_range/2 + container_weight*2, 0, -(half_height) ], size=[ container_weight, w_range * 1, height] , axis='X')
# w3 = create_pusher( stage, "/World/W3", pos=support_translate + [ 0, w_range/2 + container_weight*2, half_height ], size=[w_range * 1, container_weight, height] , axis='Y')
# w4 = create_pusher( stage, "/World/W4", pos=support_translate - [ 0, w_range/2 + container_weight*2, -(half_height) ], size=[w_range * 1, container_weight, height], axis='Y' )
# self.articulation = CoolController()
pusher = load_obj_usd("./assets/pusher/pusher/pusher.usd", pusher_path, scale=(1, 1, 1), translate=[0, 0, w_range])
# self.robot = self.world.scene.add(Robot(prim_path=pusher_path, name='pusher'))
self.config()
# self.walls = [w1,w2,w3,w4]
# add_box( stage, "/World/W1", position=support_translate + [ w_range/2 + container_weight*2, 0, half_height ], orientation=[1,0,0,0], size=[ container_weight, w_range*2, height], color=[0,0.1,0.7] )
# add_box( stage, "/World/W2", position=support_translate - [ w_range/2 + container_weight*2, 0, -(half_height) ], orientation=[1,0,0,0], size=[ container_weight, w_range*2, height], color=[0,0.1,0.7] )
# add_box( stage, "/World/W3", position=support_translate + [ 0, w_range/2 + container_weight*2, half_height ], orientation=[1,0,0,0], size=[w_range*2, container_weight, height], color=[0,0.1,0.7] )
# add_box( stage, "/World/W4", position=support_translate - [ 0, w_range/2 + container_weight*2, -(half_height) ], orientation=[1,0,0,0], size=[w_range*2, container_weight, height], color=[0,0.1,0.7] )
names = get_random_name( self.sample_range, self.object_num)
all_paths = []
all_prims = []
for i, name in enumerate(names):
rand_pos = (np.random.rand(3) - 0.5) * o_range
rand_pos[2] = 0.2 * i + 0.25
prim = self.add_model(name, rand_pos + support_translate)
all_prims.append(prim)
all_paths.append(prim.GetPrimPath())
self.all_prims = all_prims
self.all_paths = all_paths
self.names = names
def load_ycb(self, data_index, offset=[0,0,0]):
data_path = os.path.join(self.save_folder, "%d.npy" % data_index)
data = np.load(data_path, allow_pickle=True).item()
names = data['name']
mats = data['mat']
remove(self.all_paths)
all_paths = []
all_prims = []
for i, name in enumerate(names):
mat = mats[i]
mat[:3,3] += offset
prim = self.add_model(name)
set_transform(prim, mat)
all_prims.append(prim)
all_paths.append(prim.GetPrimPath())
self.all_prims = all_prims
self.all_paths = all_paths
self.names = names
def remove_walls(self):
remove(["/World/W1", "/World/W2", "/World/W3", "/World/W4"])
def record_state(self):
if not os.path.exists(self.save_folder):
os.makedirs(self.save_folder, exist_ok=True)
save_path = os.path.join(self.save_folder, "%s" % (self.state_num))
state = {}
state['mat'] = []
state['name'] = self.names
for i, prim in enumerate(self.all_prims):
mat = get_transform(prim)
state['mat'].append(mat)
np.save( save_path, state)
self.state_num += 1
train_num = 2
# data = DataGenerate("E:/dataset/ycb", "E:/dataset/tap/train/")
data = DataGenerate( my_world, "E:/dataset/grasp_net/models", "E:/dataset/tap/train", start_num=5)
my_world.reset()
is_load = False
i = 0
while simulation_app.is_running():
my_world.step(render=True)
if my_world.is_playing():
if my_world.current_time_step_index == 0:
my_world.reset()
if not is_load:
if train_num > 0:
i += 1
if i == 1:
data.generate_ycb()
data.init_pusher()
# my_world.pause()
elif i < 200:
continue
elif i < 500:
data.close()
elif i < 700:
data.open()
elif i < 1000:
data.close()
elif i < 1200:
data.open()
elif i == 1200:
print(train_num, ' ====')
# data.record_state()
# elif i == 2000:
i = 0
train_num -= 1
else:
data.load_ycb(0, [0, 0, 0])
print(time.time() - start, " s ---------------")
simulation_app.close()
| 12,713 |
Python
| 36.952239 | 214 | 0.599544 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/tools/pv_tools.py
|
import pyvista as pv
from matplotlib.cm import get_cmap
import numpy as np
def bbox(mesh:pv.PolyData):
min_x, max_x, min_y, max_y, min_z, max_z = mesh.bounds
w = max_x - min_x
l = max_y - min_y
h = max_z - min_z
size = np.array([w,l,h])
return size
def get_pc(points):
pc = pv.PolyData(points)
pc['pos'] = points
return pc
def get_axis(mat=np.eye(4)):
axes = get_pc(np.zeros((3,3)) + mat[:3,3])
axes['norm'] = mat[:3,:3]
axes_arrows = axes.glyph(
orient='norm',
scale=False,
factor=0.08,
)
return axes_arrows
def get_color(value):
cmap = get_cmap("nipy_spectral")
colors = ( np.array(cmap(value))[:3] * 255.0).astype(np.uint8)
return colors
def show_mesh(meshes, colors=None):
plotter = pv.Plotter()
plotter.add_axes()
main_axes = get_axis()
for i, m in enumerate(meshes):
if colors is not None:
c = colors[i]
else:
c = get_color( np.random.rand() )
# plotter.add_mesh(m, scalars='pos')
plotter.add_mesh(m, color=c)
plotter.add_mesh(main_axes, color='red')
plotter.show()
| 1,174 |
Python
| 22.039215 | 66 | 0.561329 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/tools/usd_convert.py
|
# --------------------------------------------------.
# obj to usd conversion.
# See : https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_asset-converter.html
# >>> https://github.com/ft-lab/omniverse_sample_scripts/blob/main/AssetConverter/importObjToUSD.py
# !!! run in script windows in isaac sim
# --------------------------------------------------.
import carb
import omni
import asyncio
# import omni.kit.asset_converter
import os
# Progress of processing.
def progress_callback (current_step: int, total: int):
# Show progress
print(f"{current_step} of {total}")
# Convert asset file(obj/fbx/glTF, etc) to usd.async
async def convert_asset_to_usd (input_asset: str, output_usd: str):
print("here")
# Input options are defaults.
converter_context = omni.kit.asset_converter.AssetConverterContext()
converter_context.ignore_materials = False
converter_context.ignore_camera = True
converter_context.ignore_animations = True
converter_context.ignore_light = True
converter_context.export_preview_surface = False
converter_context.use_meter_as_world_unit = False
converter_context.create_world_as_default_root_prim = True
converter_context.embed_textures = True
converter_context.convert_fbx_to_y_up = False
converter_context.convert_fbx_to_z_up = False
converter_context.merge_all_meshes = False
converter_context.use_double_precision_to_usd_transform_op = False
converter_context.ignore_pivots = False
converter_context.keep_all_materials = True
converter_context.smooth_normals = True
instance = omni.kit.asset_converter.get_instance()
task = instance.create_converter_task(input_asset, output_usd, progress_callback, converter_context)
# Wait for completion.
success = await task.wait_until_finished()
if not success:
print(input_asset)
carb.log_error(task.get_status(), task.get_detailed_error())
print("converting done")
def ycb():
YCB_DIRECTORY = "E:/dataset/ycb"
# usd_path = f'{YCB_DIRECTORY}/{obj_name}/google_16k/
for model_folder in os.listdir(YCB_DIRECTORY):
#for model_folder in ["007_tuna_fish_can"]:
mf = os.path.join(YCB_DIRECTORY, model_folder, "google_16k")
tf = os.path.join(YCB_DIRECTORY, model_folder, "google_16k", "textures")
if os.path.exists(mf) and not os.path.exists(tf):
print(model_folder)
input_obj = os.path.join(mf, "textured.obj")
output_usd = os.path.join(mf, "text.usd")
# convert_asset_to_usd(input_obj, output_usd)
asyncio.ensure_future(
convert_asset_to_usd(input_obj, output_usd))
GN_DIRECTORY = "E:/dataset/grasp_net/models"
for model_folder in os.listdir(GN_DIRECTORY):
#for model_folder in ["022"]:
mf = os.path.join(GN_DIRECTORY, model_folder)
if not os.path.isdir(mf):
continue
tf = os.path.join(mf, "omni", "textures")
if os.path.exists(mf) and not os.path.exists(tf):
print(model_folder)
input_obj = os.path.join(mf, "simple.dae")
output_usd = os.path.join(mf, "omni", "simple.usd")
# convert_asset_to_usd(input_obj, output_usd)
asyncio.ensure_future(
convert_asset_to_usd(input_obj, output_usd))
| 3,310 |
Python
| 37.5 | 104 | 0.655287 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/tools/graspnet.py
|
import os
import numpy as np
import os
GN_DIRECTORY = "E:/dataset/grasp_net/models"
def get_random_name(sample_range=[0, 13], sample_num=10):
valid_list = np.load("./data/valid_grasp.npy")
invalid_list = []
for v in valid_list:
if v == 0:
invalid_list.append(v)
invalid_list.append(5) # banana
ids = [i for i in range(sample_range[0], sample_range[1]) if i not in invalid_list]
sample_ids = np.random.choice(ids, sample_num)
ret = []
for sid in sample_ids:
ret.append("%03d" % sid)
return ret
def convert():
import pybullet as p
import os
for index in range(0, 88):
# for index in [82]:
obj_name = "%03d" % index
name_in = os.path.join(GN_DIRECTORY, obj_name, "textured.obj")
name_out = os.path.join(GN_DIRECTORY, obj_name, "vhacd.obj")
name_log = os.path.join("./log")
p.vhacd( name_in, name_out, name_log, resolution=500000, depth=5)
def volume():
all = []
import pyvista as pv
for index in range(0, 88):
# for index in [82]:
obj_name = "%03d" % index
name_in = os.path.join(GN_DIRECTORY, obj_name, "textured.obj")
mesh = pv.read(name_in)
data = pv.MultiBlock([mesh])
volume = data.volume
all.append(volume)
print(index, volume)
np.save("./data/volume", all)
if __name__ == '__main__':
volume()
| 1,430 |
Python
| 24.553571 | 87 | 0.572727 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/tools/ycb.py
|
import os
import numpy as np
YCB_DIRECTORY = "E:/dataset/ycb"
ycb_list = [
"002_master_chef_can",
"003_cracker_box",
"004_sugar_box",
"005_tomato_soup_can",
"006_mustard_bottle",
"007_tuna_fish_can",
"008_pudding_box",
"009_gelatin_box",
"010_potted_meat_can",
"011_banana",
"013_apple",
"014_lemon",
"015_peach",
"016_pear",
"017_orange",
"018_plum",
"019_pitcher_base",
"021_bleach_cleanser",
"024_bowl",
"025_mug",
"036_wood_block",
"046_plastic_bolt",
"052_extra_large_clamp",
"053_mini_soccer_ball",
"054_softball",
"055_baseball",
"056_tennis_ball",
"057_racquetball",
"058_golf_ball",
"065-a_cups",
"065-b_cups",
"065-c_cups",
"065-d_cups",
"077_rubiks_cube"
]
def get_random_name(sample_range=[0,6], sample_num=10):
ids = [i for i in range(sample_range[0], sample_range[1])]
sample_ids = np.random.choice(ids, sample_num)
ret = []
for sid in sample_ids:
ret.append(ycb_list[sid])
return ret
def convert():
import pybullet as p
import os
for obj_name in ycb_list:
name_in = os.path.join(YCB_DIRECTORY, obj_name, "google_16k", "textured.obj")
name_out = os.path.join(YCB_DIRECTORY, obj_name, "google_16k", "vhacd.obj")
name_log = os.path.join("./log")
p.vhacd( name_in, name_out, name_log, resolution=500000, depth=1)
if __name__ == "__main__":
convert()
| 1,481 |
Python
| 21.8 | 85 | 0.577988 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/tools/omni_tools.py
|
import omni
from pxr import UsdPhysics, UsdGeom, Gf, Sdf, Usd
from omni.isaac.core.utils.string import find_unique_string_name
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core.objects.cuboid import VisualCuboid
from omni.physx.scripts import utils
from scipy.spatial.transform import Rotation
import carb
import carb.events
# from omni.debugdraw import _debugDraw
import math
import numpy as np
def set_drive_parameters(drive, target_type, target_value, stiffness=None, damping=None, max_force=None):
"""Enable velocity drive for a given joint"""
if target_type == "position":
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
elif target_type == "velocity":
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
if stiffness is not None:
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
if damping is not None:
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
if max_force is not None:
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
def clean_transform(prim):
# prim.RemoveProperty("xformOp:translate")
# prim.RemoveProperty("xformOp:orient")
# prim.RemoveProperty("xformOp:scale")
prim.GetAttribute("xformOpOrder").Set(['xformOp:translate', 'xformOp:rotateXYZ', 'xformOp:orient', 'xformOp:scale'])
set_translate(prim, [0,0,0])
set_orientation(prim, [1,0,0,0])
set_scale(prim, [1,1,1])
def get_prim_path(prim):
return prim.GetPrimPath()
def get_attr(prim, op_name, data_type):
xform_ops = prim.GetAttribute("xformOpOrder").Get()
if xform_ops is None:
xform = UsdGeom.Xformable(prim)
prim.GetAttribute("xformOpOrder").Set([op_name])
attr = prim.GetAttribute(op_name).Get()
if attr is None:
prim.CreateAttribute(op_name, data_type, False)
attr = prim.GetAttribute(op_name).Get()
return attr
def set_orientation(prim, orientation, use_quatd=True):
orientation = np.array(orientation).astype("float")
op_name = "xformOp:rotateXYZ"
attr = get_attr(prim, op_name, Sdf.ValueTypeNames.Float3)
op_name = "xformOp:orient"
attr = get_attr(prim, op_name, Sdf.ValueTypeNames.Quatd)
if attr is not None:
if type(attr) == Gf.Quatd:
orient = Gf.Quatd(orientation[0], orientation[1], orientation[2], orientation[3])
else:
orient = Gf.Quatf(orientation[0], orientation[1], orientation[2], orientation[3])
prim.GetAttribute(op_name).Set( orient )
def set_rotation(prim, orientation):
orientation = np.array(orientation).astype("float")
op_name = "xformOp:rotateXYZ"
attr = get_attr(prim, op_name, Sdf.ValueTypeNames.Float3)
if attr is not None:
if type(attr) == Gf.Vec3f:
orient = Gf.Vec3f(orientation[0], orientation[1], orientation[2])
else:
orient = Gf.Vec3d(orientation[0], orientation[1], orientation[2])
prim.GetAttribute(op_name).Set( orient )
def set_transform(prim, mat):
# TODO
translate = mat[:3,3]
rot = mat[:3,:3]
x,y,z,w=Rotation.from_matrix(rot).as_quat()
set_translate(prim, translate)
set_orientation(prim, [w,x,y,z])
# prim.CreateAttribute("xformOp:transform", Sdf.ValueTypeNames.Matrix4d, False).Set(Gf.Matrix4d(mat))
def set_translate(prim, translate):
translate = np.array(translate).astype("float")
op_name = "xformOp:translate"
attr = get_attr(prim, op_name, Sdf.ValueTypeNames.Float3)
if type(attr) == Gf.Vec3f:
trans = Gf.Vec3f(translate[0], translate[1], translate[2])
else:
trans = Gf.Vec3d(translate[0], translate[1], translate[2])
prim.GetAttribute(op_name).Set( trans )
def set_scale(prim, scale):
scale = np.array(scale).astype("float")
op_name = "xformOp:scale"
attr = get_attr(prim, op_name, Sdf.ValueTypeNames.Float3)
if type(attr) == Gf.Vec3f:
s = Gf.Vec3f(scale[0], scale[1], scale[2])
else:
s = Gf.Vec3d(scale[0], scale[1], scale[2])
prim.GetAttribute(op_name).Set(s)
def get_orientation(prim):
orient = prim.GetAttribute("xformOp:orient").Get()
real = [orient.GetReal()]
img = list(orient.GetImaginary())
return np.array(real + img)
def get_transform(prim):
translate = get_translate(prim)
w,x,y,z = get_orientation(prim)
ret = np.eye(4)
mat = Rotation.from_quat([x,y,z,w]).as_matrix()
ret[:3,3] = translate
ret[:3,:3] = mat
return ret
def get_translate(prim):
translate = prim.GetAttribute("xformOp:translate").Get()
return np.array(translate)
def get_scale(prim):
return prim.GetAttribute("xformOp:scale").Get()
def get_prim(prim_path):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(prim_path)
return prim
def get_unique_path(prim_path):
prim_path = find_unique_string_name(
initial_name=prim_path, is_unique_fn=lambda x: not is_prim_path_valid(x)
)
return prim_path
def get_unique_name(scene, name):
ret = find_unique_string_name(
initial_name=name, is_unique_fn=lambda x: not scene.object_exists(x)
)
return ret
def load_obj_usd(usd_path, prim_path, translate=[0,0,0], orientation=[1,0,0,0], scale=[1,1,1], set_rigid=False, kinematic=False, set_collision='convexDecomposition'):
# set_collision / approximationShape:
# "none", "convexHull", "convexDecomposition", "boundingCube", "boundingSphere", "meshSimplification"
stage = omni.usd.get_context().get_stage()
prim_path = get_unique_path(prim_path)
prim = stage.GetPrimAtPath(usd_path)
if not prim.IsValid():
prim = stage.DefinePrim( prim_path, "Xform")
prim.GetReferences().AddReference(usd_path)
if set_rigid:
utils.setRigidBody(prim, set_collision, kinematic)
elif set_collision is not None:
utils.setStaticCollider(prim, set_collision)
clean_transform(prim)
set_translate(prim, translate)
set_orientation(prim, orientation)
set_scale(prim, scale)
return prim
def add_box(stage, primPath, size, position, orientation, color, collision=True, rigid=False):
# defaultPrimPath = str(stage.GetDefaultPrim().GetPath())
cubePrimPath = primPath
position = Gf.Vec3f( position[0], position[1], position[2])
orientation = Gf.Quatf(orientation[0], orientation[1], orientation[2], orientation[3])
color = Gf.Vec3f(color[0], color[1], color[2])
cubeGeom = UsdGeom.Cube.Define(stage, cubePrimPath)
cubePrim = stage.GetPrimAtPath(cubePrimPath)
cubeGeom.CreateSizeAttr(1)
scale = Gf.Vec3f( list(size) )
cubeGeom.AddTranslateOp().Set(position)
cubeGeom.AddOrientOp().Set(orientation)
cubeGeom.AddScaleOp().Set(scale)
cubeGeom.CreateDisplayColorAttr().Set([color])
if collision:
UsdPhysics.CollisionAPI.Apply(cubePrim)
UsdPhysics.MassAPI.Apply(cubePrim)
if rigid:
utils.setRigidBody(cubePrim, 'convexHull', False)
return cubePrim
def set_visible(prim, visible=True):
if visible:
prim.GetAttribute("visibility").Set("inherited")
else:
print
prim.GetAttribute("visibility").Set("invisible")
def remove(primPath):
omni.kit.commands.execute("DeletePrims", paths=primPath)
def set_view_reso(w=640, h=480):
viewport = omni.kit.viewport_legacy.get_viewport_interface()
# acquire the viewport window
viewport_handle = viewport.get_instance("Viewport")
viewport_window = viewport.get_viewport_window(viewport_handle)
# Set viewport resolution, changes will occur on next frame
viewport_window.set_texture_resolution(w, h)
def vec_to_mat(fromVector, toVector):
# https://blog.csdn.net/doubtfire/article/details/122100943
fromVector = np.array(fromVector)
fromVector_e = fromVector / np.linalg.norm(fromVector)
toVector = np.array(toVector)
toVector_e = toVector / np.linalg.norm(toVector)
cross = np.cross(fromVector_e, toVector_e)
cross_e = cross / np.linalg.norm(cross)
dot = np.dot(fromVector_e, toVector_e)
angle = math.acos(dot)
if angle == 0 or angle == math.pi:
print("两个向量处于一条直线")
return [1, 0,0,0]
else:
quat = [math.cos(angle/2), cross_e[0]*math.sin(angle/2), cross_e[1]*math.sin(angle/2), cross_e[2]*math.sin(angle/2)]
# return Rotation.from_quat(quat).as_matrix()
return quat
def vec_to_matrix( from_v, to_v ):
from_v = np.array(from_v)
fromVector_e = from_v / np.linalg.norm(from_v)
to_v = np.array(to_v)
toVector_e = to_v / np.linalg.norm(to_v)
cross = np.cross(fromVector_e, toVector_e)
vec = cross / np.linalg.norm(cross)
dot = np.dot(fromVector_e, toVector_e)
theta = np.math.acos(dot)
rot = np.zeros((3,3))
x, y, z = vec
xx = x**2
yy = y**2
zz = z**2
xy = x*y
xz = x*z
yz = z*y
cost = np.math.cos(theta)
sint = np.math.sin(theta)
rot[0,0] = xx*(1-cost) + cost
rot[0,1] = xy*(1-cost) + z*sint
rot[0,2] = xz*(1-cost) - y*sint
rot[1,0] = xy*(1-cost) - z*sint
rot[1,1] = yy*(1-cost) + cost
rot[1,2] = yz*(1-cost) + x*sint
rot[2,0] = xz*(1-cost) + y*sint
rot[2,1] = yz*(1-cost) - x*sint
rot[2,2] = zz*(1-cost) + cost
return rot
def add_arrow(stage, primPath, start_pos=None, end_pos=None, mat=None, arrow_len=None, radius=0.01, color=[1,0,0]):
line_path = primPath + '_line'
arrow_path = primPath + '_arrrow'
if mat is None:
start_pos = np.array(start_pos)
end_pos = np.array(end_pos)
direct = end_pos - start_pos
arrow_len = np.linalg.norm(direct)
else:
start_pos = mat[:3,3]
direct = mat[:3,:3] @ np.array([0,0,1])
end_pos = start_pos + direct * arrow_len
orientation = vec_to_mat([0,0,1], direct / arrow_len)
position = start_pos + direct / 2
end_pos += direct / arrow_len * radius
end_position = Gf.Vec3f( end_pos[0], end_pos[1], end_pos[2] )
position = Gf.Vec3f( position[0], position[1], position[2])
orientation = Gf.Quatf(orientation[0], orientation[1], orientation[2], orientation[3])
color = Gf.Vec3f(color[0], color[1], color[2])
line_geom = UsdGeom.Cylinder.Define(stage, line_path)
cone_geom = UsdGeom.Cone.Define(stage, arrow_path)
# line_geom.GetExtentAttr('radius').Set( radius * 1.5 )
# line_geom.GetExtentAttr('height').Set( radius * 3 )
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path( line_path + '.radius'),
value=radius, prev=1)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path( line_path + '.height'),
value=arrow_len, prev=1)
line_geom.AddTranslateOp().Set(position)
line_geom.AddOrientOp().Set(orientation)
line_geom.CreateDisplayColorAttr().Set([color])
line_geom.AddScaleOp().Set(Gf.Vec3f(1.,1.,1.))
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path( arrow_path + '.radius'),
value=radius * 1.5, prev=2)
omni.kit.commands.execute('ChangeProperty',
prop_path=Sdf.Path( arrow_path + '.height'),
value=radius * 3, prev=2)
cone_geom.AddTranslateOp().Set(end_position)
cone_geom.AddOrientOp().Set(orientation)
cone_geom.AddScaleOp().Set(Gf.Vec3f(1.,1.,1.))
cone_geom.CreateDisplayColorAttr().Set([color])
line_prim = stage.GetPrimAtPath(line_path)
return line_prim
def drawArrow (p1, p2, color=0xffffc000):
# https://github.com/ft-lab/omniverse_sample_scripts/blob/7f4406520da9abcb93c5ffa73bdcff8a2dfad7e5/UI/DebugDraw/UseDebugDraw.py
_debugDraw = _debugDraw.acquire_debug_draw_interface()
_debugDraw.draw_line(carb.Float3(p1[0], p1[1], p1[2]), color, carb.Float3(p2[0], p2[1], p2[2]), color)
P1 = Gf.Vec3f(p1[0], p1[1], p1[2])
P2 = Gf.Vec3f(p2[0], p2[1], p2[2])
vDir = P2 - P1
lenV = vDir.GetLength()
vDir /= lenV
v1_2 = Gf.Vec4f(vDir[0], vDir[1], vDir[2], 1.0)
v2_2 = Gf.Vec4f(0, 1, 0, 1.0)
v3_2 = Gf.HomogeneousCross(v1_2, v2_2)
vDirX = Gf.Vec3f(v3_2[0], v3_2[1], v3_2[2]).GetNormalized()
vD1 = (vDir + vDirX).GetNormalized() * (lenV * 0.1)
vD2 = (vDir - vDirX).GetNormalized() * (lenV * 0.1)
pp = P1 + vD1
_debugDraw.draw_line(carb.Float3(pp[0], pp[1], pp[2]), color, carb.Float3(P1[0], P1[1], P1[2]), color)
pp = P1 + vD2
_debugDraw.draw_line(carb.Float3(pp[0], pp[1], pp[2]), color, carb.Float3(P1[0], P1[1], P1[2]), color)
pp = P2 - vD1
_debugDraw.draw_line(carb.Float3(pp[0], pp[1], pp[2]), color, carb.Float3(P2[0], P2[1], P2[2]), color)
pp = P2 - vD2
_debugDraw.draw_line(carb.Float3(pp[0], pp[1], pp[2]), color, carb.Float3(P2[0], P2[1], P2[2]), color)
| 13,288 |
Python
| 31.333333 | 166 | 0.642083 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/tools/omni_env.py
|
import os
"""
change the omniverse_path to your own, then open a terminal:
mkdir .vscode & python omni_env.py > .vscode/settings.json
"""
omniverse_path = "C:/omniverse/pkg/isaac_sim-2022.1.1"
python_path = os.path.join( omniverse_path, "kit/python/python.exe" ).replace("\\", "/" )
def log_import_path(path, ret=""):
folders = os.listdir(path)
for f in folders:
ret += " \"" + os.path.join(path, f).replace("\\", "/" ) + "\",\n"
return ret
# add omni path
path = os.path.join( omniverse_path, "kit/extscore")
ret = log_import_path(path)
path = os.path.join( omniverse_path, "exts")
ret = log_import_path(path, ret)
path = os.path.join( omniverse_path, "kit/extsphysics")
ret = log_import_path(path, ret)
ret += " \"" + os.path.join( omniverse_path, "kit/extsphysics/omni.usd.schema.physx/pxr").replace("\\", "/" ) + "\",\n"
ret += " \"" + os.path.join( omniverse_path, "kit/plugins/bindings-python").replace("\\", "/" ) + "\",\n"
# add pip-module path, like numpy
ret += " \"" + os.path.join( omniverse_path, "kit/extscore/omni.kit.pip_archive/pip_prebundle").replace("\\", "/" ) + "\""
# set json str
final_ret = '''
{
"python.defaultInterpreterPath": "%s",
"python.autoComplete.extraPaths": [
%s
],
"python.analysis.extraPaths": [
%s
]
}
''' % (
python_path,
ret,
ret,
)
print(final_ret)
| 1,387 |
Python
| 24.236363 | 129 | 0.599135 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/scene/camera.py
|
import omni
import math
import omni.kit.commands
from pxr import Sdf, Gf
import omni.replicator.core as rep
import numpy as np
from robot.tools.omni_tools import *
class Camera(object):
def __init__(self, stage, prim_path, translate, orientation, focal_length=18.14, focus_distance=400, resolution=(640, 480)) -> None:
self.prim_path = prim_path
self.stage = stage
self.resolution = resolution
self.camera = self.add_camera(stage, prim_path, translate, orientation, focal_length, focus_distance)
self.render_product = rep.create.render_product(prim_path, resolution=resolution)
self.rgb_anno = rep.AnnotatorRegistry.get_annotator("rgb")
self.dep_anno = rep.AnnotatorRegistry.get_annotator("distance_to_image_plane")
self.rgb_anno.attach([self.render_product])
self.dep_anno.attach([self.render_product])
def add_camera(self, stage, prim_path, translate, orientation, focal_length, focus_distance):
cameraGeom = UsdGeom.Camera.Define(stage, prim_path)
cam = get_prim(prim_path)
cam.GetAttribute('focalLength').Set(focal_length)
cam.GetAttribute('focusDistance').Set(focus_distance)
cam.GetAttribute('fStop').Set(0.0)
cam.GetAttribute('projection').Set('perspective')
cam.GetAttribute('clippingRange').Set(Gf.Vec2f(0.01, 10000))
if len(orientation) == 4:
w,x,y,z = orientation
orientation = list(Rotation.from_quat([x,y,z,w]).as_euler('XYZ'))
orientation = [ np.rad2deg(ang) for ang in orientation ]
rotation = Rotation.from_euler('XYZ', [ np.deg2rad(ang) for ang in orientation ]).as_matrix()
# Set position.
UsdGeom.XformCommonAPI(cameraGeom).SetTranslate(list(translate))
# Set rotation.
UsdGeom.XformCommonAPI(cameraGeom).SetRotate(list(orientation), UsdGeom.XformCommonAPI.RotationOrderXYZ)
# Set scale.
UsdGeom.XformCommonAPI(cameraGeom).SetScale((1, 1, 1))
# omni.kit.commands.execute('ChangeProperty',
# prop_path=Sdf.Path(f'{prim_path}.xformOp:rotateXYZ'),
# value=Gf.Vec3d(orientation),
# prev=Gf.Vec3d(0,0,0))
width, height = self.resolution
horiz_aperture = cam.GetAttribute("horizontalAperture").Get()
# https://forums.developer.nvidia.com/t/creating-a-custom-camera-on-isaac-sim-app/187375/2
# https://forums.developer.nvidia.com/t/camera-intrinsic-matrix/213799
horizontal_fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
vertical_fov = (height / width * horizontal_fov)
focal_x = (width / 2.0) / np.tan(horizontal_fov / 2.0)
focal_y = (height / 2.0) / np.tan(vertical_fov / 2.0)
center_x = width * 0.5
center_y = height * 0.5
self.intrinsic = np.array([
[focal_x, 0, center_x],
[0, focal_y, center_y],
[0, 0, 1]])
self.extrinsic = np.eye(4)
self.extrinsic[:3,:3] = rotation
self.extrinsic[:3,3] = translate
return cam
def take_rgb(self):
rgb_data = self.rgb_anno.get_data()
rgb_image_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape)
return rgb_image_data
def take_dep(self):
data = self.dep_anno.get_data()
# Get size.
hei, wid = data.shape[:2]
# Store data (buff[hei][wid]).
buff = np.frombuffer(data, np.float32).reshape(hei, wid)
buff[buff == buff.max()] = 0
return buff
def cam_to_world(self, points):
rot = self.extrinsic[:3,:3]
pos = self.extrinsic[:3, 3]
points = (rot @ points.transpose()).transpose() + pos
return points
def get_camera(self):
# viewport = omni.kit.viewport_legacy.get_viewport_interface()
# viewportWindow = viewport.get_viewport_window()
# cameraPath = viewportWindow.get_active_camera()
# Get stage.
# stage = omni.usd.get_context().get_stage()
#time_code = omni.timeline.get_timeline_interface().get_current_time() * stage.GetTimeCodesPerSecond()
time_code = Usd.TimeCode.Default()
# Get active camera.
cameraPrim = self.stage.GetPrimAtPath(self.prim_path)
if cameraPrim.IsValid():
camera = UsdGeom.Camera(cameraPrim) # UsdGeom.Camera
cameraV = camera.GetCamera(time_code) # Gf.Camera
print("Aspect : " + str(cameraV.aspectRatio))
print("fov(H) : " + str(cameraV.GetFieldOfView(Gf.Camera.FOVHorizontal)))
print("fov(V) : " + str(cameraV.GetFieldOfView(Gf.Camera.FOVVertical)))
print("FocalLength : " + str(cameraV.focalLength))
print("World to camera matrix : " + str(cameraV.transform))
viewMatrix = cameraV.frustum.ComputeViewMatrix()
print("View matrix : " + str(viewMatrix))
viewInv = viewMatrix.GetInverse()
# Camera position(World).
cameraPos = viewInv.Transform(Gf.Vec3f(0, 0, 0))
print("Camera position(World) : " + str(cameraPos))
# Camera vector(World).
cameraVector = viewInv.TransformDir(Gf.Vec3f(0, 0, -1))
print("Camera vector(World) : " + str(cameraVector))
projectionMatrix = cameraV.frustum.ComputeProjectionMatrix()
print("Projection matrix : " + str(projectionMatrix))
#cv = CameraUtil.ScreenWindowParameters(cameraV)
#print(cv.screenWindow)
print(self.intrinsic)
print(self.extrinsic)
| 5,774 |
Python
| 38.827586 | 136 | 0.602875 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/scene/scene.py
|
import omni
from robot.scene.camera import Camera
from robot.scene.robot import UR5
import numpy as np
import os
from robot.tools.omni_tools import *
YCB_DIRECTORY = "E:/dataset/ycb"
# GN_DIRECTORY = "E:/dataset/grasp_net/models"
ASSETS_DIR = "E:/workspace/visual_match/robot/assets"
class Scene(object):
def __init__(self, world, robot_height=0.2, offset=[0,0,0]):
self.task_prim_path = '/World/Scene'
self.world = world
self.stage = omni.usd.get_context().get_stage()
self.offset = np.array(offset).astype("float")
self.robot_height = robot_height
self.all_prims = []
self.all_paths = []
self.names = []
# support size
self.support_translate = np.array([0.6, 0, -self.robot_height/2])
self.support_size = np.array([0.7, 0.7, 1])
def init_scene(self):
self.scene_prim = self.stage.DefinePrim( self.task_prim_path, "Xform")
clean_transform(self.scene_prim)
self.add_robot()
self.add_support()
self.add_camera()
self.obj_prim_path = f"{self.robot.prim_path}/OBJ"
self.obs_prim_path = f"{self.robot.prim_path}/OBS"
def add_robot(self):
self.robot = UR5( ASSETS_DIR, self.world, self.task_prim_path, position=[0, 0, self.robot_height], offset=self.offset)
self.base_position = self.robot.position
base_pos = self.base_position - [0,0, self.robot_height/2.0]
add_box(self.stage, self.task_prim_path + "/ur5_base", [0.2, 0.2, self.robot_height], base_pos, [1,0,0,0], [0.8,0.8,0.8])
self.obs_prim = self.stage.DefinePrim( self.robot.prim_path + "/OBS", "Xform")
self.robot.to_init_state()
def set_obs_visible(self, visible=True):
set_visible(self.obs_prim, visible)
def add_model(self, obj_name, position=[0,0,0], orientation=[1, 0, 0, 0], scale=[1,1,1], use_convert=False):
if use_convert:
usd_path = f'{YCB_DIRECTORY}/{obj_name}/google_16k/_converted/text.usd'
else:
usd_path = f'{YCB_DIRECTORY}/{obj_name}/google_16k/text.usd'
prim_path = f"{self.robot.prim_path}/OBJ/obj_{obj_name}"
prim = load_obj_usd(usd_path, prim_path, position, orientation, scale, set_rigid=True, kinematic=False)
prim_path = get_prim_path(prim)
self.all_prims.append(prim)
self.all_paths.append(prim_path)
self.names.append(obj_name[4:].replace('_', ' '))
return prim
def load_objects(self):
center = self.support_translate + [0, 0, 0.2]
# self.add_model('026_sponge', position= center+[-0.1, -0.15, 0.05], orientation=[1,0,0,0] )
self.add_model('008_pudding_box', position= center+[0.1, -0.2, 0.1] )
self.add_model('011_banana', position= center+[0, -0.1, 0.1] )
self.add_model('013_apple', position= center+[-0.1, 0, 0.1] )
self.add_model('014_lemon', position= center+[-0.22, 0.0, 0.1] )
def load_objects_2(self):
center = self.support_translate + [0, 0, 0.2]
# self.add_model('026_sponge', position= center+[0.07, -0.15, 0.05], orientation=[1,0,0,0] )
self.add_model('008_pudding_box', position= center+[0.15, 0.16, 0.05], orientation=[0.62,0,0,0.78], use_convert=True )
self.add_model('011_banana', position= center+[-0.16, 0.2, 0.05], orientation=[0.89,0,0,-0.438], use_convert=True )
self.add_model('013_apple', position= center+[-0.15, 0.1, 0.05], use_convert=True )
self.add_model('014_lemon', position= center+[-0.05, 0.13, 0.05], orientation=[-0.597, 0, 0, 0.797], use_convert=True )
def add_support(self):
# object support
init_support_size = np.array([1, 1, self.robot_height])
load_obj_usd( os.path.join( ASSETS_DIR, "support", "support_flat.usd"), \
self.robot.prim_path + "/BASE/support", self.support_translate, scale=self.support_size, set_rigid=False, kinematic=False, set_collision="none")
collision_cube = get_prim(self.robot.prim_path + "/BASE/support/geo/geo")
collision_cube.GetAttribute("physics:approximation").Set("none")
self.add_obstacle("SUP", self.support_translate, scale=init_support_size * self.support_size )
def add_obstacle(self, name, translate, orientation=[1,0,0,0], scale=[0.1,0.1,0.1]):
obs_prim_path = self.robot.prim_path + "/OBS/" + name
obs = omni.isaac.core.objects.cuboid.VisualCuboid( obs_prim_path, translation=translate, orientation=orientation, color=np.array([0, 1.,0]), size=1)
set_scale( get_prim(obs_prim_path), scale)
self.robot.add_obstacle(obs)
def add_camera(self):
container_translate = self.support_translate + [0, 0, 0.6]
self.cam_observe = Camera(self.stage, self.robot.prim_path + '/CAM/camera_observe', [1.14, 0.95, 1.69], [ 37, 0, 140], resolution=(1500, 900) )
self.cam = Camera(self.stage, self.robot.prim_path + '/CAM/camera', container_translate, [0,0, 90], focal_length=12 )
def take_images(self, types=['rgb', 'dep']):
ret = []
if 'rgb' in types:
ret.append( self.cam.take_rgb() )
if 'dep' in types:
ret.append( self.cam.take_dep() )
return ret
def update_state(self):
self.robot.update_state()
def reset(self):
remove(self.all_paths)
self.all_prims = []
self.all_paths = []
self.names = []
# self.load_objects()
# scene_id = np.random.choice(self.scene_list)
# self.load_scene_objs(scene_id)
| 5,664 |
Python
| 39.177305 | 156 | 0.597811 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/scene/robot.py
|
from omni.isaac.motion_generation.lula import RmpFlow
from omni.isaac.motion_generation import ArticulationMotionPolicy
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.manipulators.grippers import ParallelGripper
from omni.isaac.surface_gripper._surface_gripper import Surface_Gripper_Properties, Surface_Gripper
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.core.robots import Robot
from omni.isaac.manipulators import SingleManipulator
from scipy.spatial.transform import Rotation as R
import os
import numpy as np
import omni
from robot.tools.omni_tools import *
class UR5(object):
def __init__(self, assets, world, task_prim_path, position=[0,0,0], orientation=[1,0,0,0], scale=[1,1,1], offset=[0,0,0]) -> None:
self.MOVING_STATE = {
"to_offset": 0,
"to_target": 1,
"stop": 2
}
# TODO add attach method
self.use_parallel = False
self.name = get_unique_name( world.scene, "ur5")
rmp_config_dir = os.path.join(assets, "ur5")
if self.use_parallel:
self.urdf_path = os.path.join(rmp_config_dir, "ur5_gripper.urdf")
self.usd_path = os.path.join(rmp_config_dir, "usd", "ur5_gripper.usd")
else:
self.urdf_path = os.path.join(rmp_config_dir, "ur5_suction.urdf")
self.usd_path = os.path.join(rmp_config_dir, "usd", "ur5_suction.usd")
self.robot_description_path = os.path.join(rmp_config_dir, "config", 'robot_descriptor.yaml')
self.rmpflow_config_path = os.path.join(rmp_config_dir, "config", 'rmpflow_config.yaml')
self.end_effector_frame_name = "gripper_center"
self.world = world
self.task_prim_path = task_prim_path
self.offset = offset
self.position = np.array(position).astype('float') + offset
self.orientation = np.array(orientation).astype('float')
self.scale = np.array(scale).astype('float')
self.gripper_state = 0
self.target_state = None
self.moving_state = self.MOVING_STATE['to_offset']
self.obstacles = []
self.init_state = np.array([0, -np.deg2rad(30), -np.deg2rad(100), -np.deg2rad(120), np.deg2rad(90), 0])
self.load_robot()
self.set_controller()
def load_robot(self):
self.prim_path = self.task_prim_path + "/ur5"
self.target_prim_path = self.task_prim_path + "/ur5_target"
stage = omni.usd.get_context().get_stage()
self.prim = load_obj_usd( usd_path=self.usd_path, prim_path=self.prim_path, \
translate=self.position, orientation=self.orientation, scale=self.scale )
self.gripper_center_prim = get_prim( self.prim_path + "/" + self.end_effector_frame_name )
set_translate(self.gripper_center_prim, [0,0,0.02])
# add target
self.target_prim = stage.DefinePrim( self.target_prim_path, "Xform")
set_translate(self.target_prim, [ 0, 0, 0 ])
def set_controller(self):
from omni.isaac.universal_robots.controllers import StackingController as UR10StackingController
if self.use_parallel:
gripper = ParallelGripper(
#We chose the following values while inspecting the articulation
end_effector_prim_path= self.prim_path + "/gripper_base" ,
joint_prim_names=["gb_gl", "gb_gr"],
joint_opened_positions=np.array([0, 0]),
joint_closed_positions=np.array([0.0275, 0.0275]),
action_deltas=np.array([-0.0275, -0.0275]),
)
#define the manipulator
self.robot = self.world.scene.add(
SingleManipulator(prim_path=self.prim_path, name=self.name,
end_effector_prim_name="gripper_base", gripper=gripper))
else:
# Gripper properties
sgp = Surface_Gripper_Properties()
sgp.d6JointPath = self.prim_path + "/gripper_vacuum/SurfaceGripper"
sgp.parentPath = self.prim_path + "/gripper_vacuum"
sgp.offset = _dynamic_control.Transform()
sgp.offset.p.x = 0
sgp.offset.p.y = 0
sgp.offset.p.z = 0.005 + 0.02
sgp.offset.r = [0.7071, 0, 0.7071, 0] # Rotate to point gripper in Z direction
sgp.gripThreshold = 0.02
sgp.forceLimit = 1.0e3
sgp.torqueLimit = 1.0e4
# sgp.forceLimit = 1.0e2
# sgp.torqueLimit = 1.0e3
sgp.bendAngle = np.pi / 2
sgp.stiffness = 1.0e4
sgp.damping = 1.0e3
dc = _dynamic_control.acquire_dynamic_control_interface()
gripper = Surface_Gripper(dc)
gripper.initialize(sgp)
self.robot = self.world.scene.add(Robot(prim_path=self.prim_path, name=self.name))
self.robot.gripper = gripper
self.rmpflow = RmpFlow(
robot_description_path = self.robot_description_path,
urdf_path = self.urdf_path,
rmpflow_config_path = self.rmpflow_config_path,
end_effector_frame_name = self.end_effector_frame_name,
evaluations_per_frame = 5,
ignore_robot_state_updates=True
)
self.rmpflow.set_robot_base_pose( get_translate(self.prim), get_orientation(self.prim) )
# self.rmpflow.visualize_collision_spheres()
# self.rmpflow.visualize_end_effector_position()
physics_dt = 1/60.
self.articulation_rmpflow = ArticulationMotionPolicy(self.robot, self.rmpflow, physics_dt)
self.articulation_controller = self.robot.get_articulation_controller()
def set_gripper_open(self):
self.set_gripper_state(1)
def set_gripper_close(self):
self.set_gripper_state(-1)
def set_gripper_stop(self):
self.set_gripper_state(0)
def set_gripper_state(self, state: int):
self.gripper_state = state
def gripper_close(self):
if self.use_parallel:
gripper_positions = self.robot.gripper.get_joint_positions()
self.robot.gripper.apply_action(
ArticulationAction(joint_positions=[gripper_positions[0] + 0.0001, gripper_positions[1] + 0.0001]))
# ArticulationAction(joint_positions=[0.008, 0.008]))
else:
self.robot.gripper.close()
def gripper_open(self):
# gripper_positions = self.robot.gripper.get_joint_positions()
# self.robot.gripper.apply_action(
# ArticulationAction(joint_positions=[gripper_positions[0] - 0.0001, gripper_positions[1] - 0.0001]))
self.robot.gripper.open()
def add_obstacle(self, prim):
self.obstacles.append(prim)
self.rmpflow.add_obstacle(prim)
def remove_obstacle(self, prim):
self.obstacles.remove(prim)
self.rmpflow.remove_obstacle(prim)
def to_init_state(self):
if self.target_state is None and self.moving_state != self.MOVING_STATE['stop']:
self.set_target(target_joints=self.init_state)
self.moving_state = self.MOVING_STATE['stop']
def set_target(self, target_position=None, target_orientation=None, target_joints=None):
if target_joints is not None:
self.target_state = np.array(target_joints)
else:
end_prim = get_prim(self.prim_path + "/gripper_center")
if target_position is None:
position = get_translate(end_prim)
target_position = position + self.position
else:
target_position = np.array(target_position).astype('float') + self.position
set_translate( self.target_prim, target_position )
if target_orientation is None:
target_orientation = get_orientation(end_prim)
else:
target_orientation = np.array(target_orientation).astype('float')
set_orientation( self.target_prim, target_orientation )
self.target_state = [ target_position, target_orientation ]
def move_to_mat(self, mat, offset=0):
x,y,z,w = Rotation.from_matrix(mat[:3,:3]).as_quat()
target_position = mat[:3,3]
target_orientation = np.array([w,x,y,z])
self.move_to(target_position, target_orientation, offset)
def move_to(self, target_position, target_orientation, offset=0):
if self.target_state is None and self.moving_state != self.MOVING_STATE['stop']:
if self.moving_state == self.MOVING_STATE['to_offset'] and offset != 0:
w, x, y, z = target_orientation
rot = R.from_quat([x,y,z,w]) # this use x,y,z,w
z = np.array([0,0,-1])
direction = rot.apply(z)
offset_pos = target_position + direction * offset
self.set_target(offset_pos, target_orientation)
self.moving_state = self.MOVING_STATE['to_target']
else:
# print("setting")
self.set_target(target_position, target_orientation)
self.moving_state = self.MOVING_STATE['stop']
def move_up(self, z_offset):
if self.target_state is None and self.moving_state != self.MOVING_STATE['stop']:
end_prim = get_prim(self.prim_path + "/gripper_center")
position = get_translate(end_prim)
position[2] += z_offset
self.move_to(position, None)
def moving_on(self):
self.moving_state = self.MOVING_STATE['to_offset']
def stop(self):
self.moving_state = self.MOVING_STATE['stop']
def is_stop(self):
return self.moving_state == self.MOVING_STATE['stop']
def check_valid_target(self, position, quat, joint_name='gripper_center'):
# quat = w,x,y,z
ret = self.rmpflow.get_kinematics_solver().compute_inverse_kinematics(joint_name, np.array(position), np.array(quat))
return ret[1]
def update_state(self):
if self.target_state is not None:
if len(self.target_state) == 2:
self.rmpflow.set_end_effector_target(
target_position=self.target_state[0],
target_orientation=self.target_state[1]
)
else:
self.rmpflow.set_cspace_target( self.target_state )
self.rmpflow.update_world(self.obstacles)
actions = self.articulation_rmpflow.get_next_articulation_action()
count = len(actions.joint_velocities)
for v in actions.joint_velocities:
if v is None or abs(v) < 1e-2:
count -= 1
if count == 0:
# print('stop')
self.target_state = None
else:
self.articulation_controller.apply_action(actions)
if self.gripper_state != 0:
if self.gripper_state == 1:
self.gripper_open()
elif self.gripper_state == -1:
self.gripper_close()
| 11,177 |
Python
| 38.638298 | 134 | 0.595867 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/scene/task.py
|
import os
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from robot.tools.omni_tools import *
from robot.scene.scene import Scene
from omni.isaac.core import World
class Env(object):
def __init__(self, render=True, save_folder="./images") -> None:
self.save_folder = save_folder
world = World(stage_units_in_meters=1.0, physics_prim_path="/World/physicsScene")
world.scene.add_default_ground_plane()
self.world = world
self.max_loop_num = 1000
self.scene = Scene(world)
self.use_robot = True
self.render = render
self.scene.init_scene()
self.scene.set_obs_visible(False)
self.world.reset()
self.is_start = False
def reset(self):
self.scene.reset()
def world_step(self, step=1, render=True):
if self.world.is_playing():
for _ in range(step):
self.world.step(render=render)
def idle(self, step=1, render=True):
if self.use_robot:
self.scene.update_state()
self.world_step(step, render)
def robot_on(self):
self.scene.robot.moving_on()
def save_images(self, prefix="cam"):
print("Take image")
rgb, dep = self.scene.take_images()
cam = self.scene.cam.intrinsic.copy()
c2w = self.scene.cam.extrinsic.copy()
os.makedirs(self.save_folder, exist_ok=True)
np.save( os.path.join(self.save_folder, f'{prefix}_cam.npy'), cam )
np.save( os.path.join(self.save_folder, f'{prefix}_c2w.npy'), c2w )
np.save( os.path.join(self.save_folder, f'{prefix}_dep.npy'), dep )
Image.fromarray(rgb, mode='RGBA').save( os.path.join(self.save_folder, f'{prefix}_rgb.png') )
camera_params = {}
# camera_params['x_offset'] = cam[0,0]
# camera_params['y_offset'] = cam[1,1]
# camera_params['fx'] = cam[0,2]
# camera_params['fy'] = cam[1,2]
camera_params['c2w'] = c2w
camera_params['cam'] = cam
return rgb[:,:,:3][:,:,::-1].copy(), dep, camera_params
def move_up(self, offset=0.1, render=True):
self.robot_on()
is_stop = False
loop_num = 0
while is_stop == False and loop_num < self.max_loop_num:
self.scene.robot.move_up(offset)
is_stop = self.scene.robot.target_state is None
self.scene.update_state()
self.world_step(render=render)
loop_num += 1
def move_to_init(self, render=True):
self.robot_on()
is_stop = False
loop_num = 0
while is_stop == False and loop_num < self.max_loop_num:
self.scene.robot.to_init_state()
is_stop = self.scene.robot.target_state is None
self.scene.update_state()
self.world_step(render=render)
loop_num += 1
def move_to_mat(self, mat, offset=0, render=True):
self.robot_on()
is_stop = False
loop_num = 0
while is_stop is False and loop_num < self.max_loop_num:
self.scene.robot.move_to_mat(mat, offset)
is_stop = self.scene.robot.target_state is None
self.scene.update_state()
self.world_step(render=render)
loop_num += 1
def pick_and_placce(self, grasp_mat, place_mat, render=True):
self.move_to_mat(grasp_mat, 0.1, render=render)
self.gripper_close(render=render)
self.move_up(0.3, render=render)
self.move_to_mat(place_mat, 0.4, render=render)
self.gripper_open(render=render)
self.move_up(0.3, render=render)
def gripper_close(self, render=True):
self.scene.robot.set_gripper_close()
self.idle(20, render)
def gripper_open(self, render=True):
self.scene.robot.set_gripper_open()
self.idle(1, render)
self.scene.robot.set_gripper_stop()
def move_to_left(self):
mat = np.eye(4)
mat[:3,3] = (0.127126, 0.126619, 0.445994)
mat[:3,:3] = Rotation.from_rotvec(np.pi * np.array([1,0,0])).as_matrix()
self.move_to_mat(mat)
def get_pick_mat(self, points_list):
pick_mats = []
for points in points_list:
p = points[points[:,2] > 0.01]
z = p[:,2].max()
x, y = p[:,:2].mean(axis=0)
pick_pos = np.array([x,y,z])
mat = np.eye(4)
mat[:3,:3] = Rotation.from_rotvec(np.pi * np.array([1,0,0])).as_matrix()
mat[:3,3] = pick_pos
pick_mats.append(mat)
return pick_mats
def test(self):
mat = np.eye(4)
mat[:3,3] = [0.4319, -0.008, 0.0906]
mat[:3,:3] = Rotation.from_rotvec(np.pi * np.array([1,0,0])).as_matrix()
self.move_to_mat(mat)
self.gripper_close()
self.idle(200)
self.move_up()
self.move_to_left()
self.gripper_open()
def run(self):
self.reset()
self.scene.load_objects()
self.idle(200)
self.save_images('tg')
self.move_to_left()
self.reset()
self.scene.load_objects_2()
self.idle(200)
self.save_images('sc')
self.world.pause()
| 5,294 |
Python
| 28.747191 | 101 | 0.555912 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/assets/ur5/config/rmpflow_config.yaml
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# Artificially limit the robot joints. For example:
# A joint with range +-pi would be limited to +-(pi-.01)
joint_limit_buffers: [.01, .01, .01, .01, .01, .01]
# RMPflow has many modifiable parameters, but these serve as a great start.
# Most parameters will not need to be modified
rmp_params:
cspace_target_rmp:
metric_scalar: 50.
position_gain: 100.
damping_gain: 50.
robust_position_term_thresh: .5
inertia: 1.
cspace_trajectory_rmp:
p_gain: 100.
d_gain: 10.
ff_gain: .25
weight: 50.
cspace_affine_rmp:
final_handover_time_std_dev: .25
weight: 2000.
joint_limit_rmp:
metric_scalar: 1000.
metric_length_scale: .01
metric_exploder_eps: 1e-3
metric_velocity_gate_length_scale: .01
accel_damper_gain: 200.
accel_potential_gain: 1.
accel_potential_exploder_length_scale: .1
accel_potential_exploder_eps: 1e-2
joint_velocity_cap_rmp:
max_velocity: 3.6
velocity_damping_region: 0.8
damping_gain: 1000.0
metric_weight: 100.
target_rmp:
accel_p_gain: 30.
accel_d_gain: 85.
accel_norm_eps: .075
metric_alpha_length_scale: .05
min_metric_alpha: .01
max_metric_scalar: 10000
min_metric_scalar: 2500
proximity_metric_boost_scalar: 20.
proximity_metric_boost_length_scale: .02
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
axis_target_rmp:
accel_p_gain: 110. # default 210
accel_d_gain: 60.
metric_scalar: 10
proximity_metric_boost_scalar: 3000.
proximity_metric_boost_length_scale: .08
xi_estimator_gate_std_dev: 20000.
accept_user_weights: false
collision_rmp:
damping_gain: 50.
damping_std_dev: .04
damping_robustness_eps: 1e-2
damping_velocity_gate_length_scale: .01
repulsion_gain: 800.
repulsion_std_dev: .01
metric_modulation_radius: .5
metric_scalar: 10000.
metric_exploder_std_dev: .02
metric_exploder_eps: .001
damping_rmp:
accel_d_gain: 30.
metric_scalar: 50.
inertia: 100.
canonical_resolve:
max_acceleration_norm: 50.
projection_tolerance: .01
verbose: false
# body_cylinders are used to promote self-collision avoidance between the robot and its base
# The example below defines the robot base to be a capsule defined by the absolute coordinates pt1 and pt2.
# The semantic name provided for each body_cylinder does not need to be present in the robot URDF.
body_cylinders:
- name: base_link
pt1: [0, 0, 0.22]
pt2: [0, 0, 0]
radius: .1
# - name: base
# pt1: [0,0,.20]
# pt2: [0,0,0.]
# radius: .1
# body_collision_controllers defines spheres located at specified frames in the robot URDF
# These spheres will not be allowed to collide with the capsules enumerated under body_cylinders
# By design, most frames in industrial robots are kinematically unable to collide with the robot base.
# It is often only necessary to define body_collision_controllers near the end effector
body_collision_controllers:
- name: forearm_link
radius: .04
- name: wrist_2_link
radius: .04
- name: wrist_3_link
radius: .04
- name: gripper_base
radius: .02
| 3,871 |
YAML
| 33.265486 | 107 | 0.64867 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/assets/ur5/config/robot_descriptor.yaml
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
# The robot descriptor defines the generalized coordinates and how to map those
# to the underlying URDF dofs.
api_version: 1.0
# Defines the generalized coordinates. Each generalized coordinate is assumed
# to have an entry in the URDF.
# RMPflow will only use these joints to control the robot position.
# # Global frame of the URDF
# root_link: world
# # The default cspace position of this robot
# default_q: [
# 0.00, 0.00, 0.00, 0.00, 0.00, 0.00
# ]
cspace:
- shoulder_pan_joint
- shoulder_lift_joint
- elbow_joint
- wrist_1_joint
- wrist_2_joint
- wrist_3_joint
root_link: world
subtree_root_link: base_link
default_q: [-1.57, -1.57, -1.57, -1.57, 1.57, 0]
# Most dimensions of the cspace have a direct corresponding element
# in the URDF. This list of rules defines how unspecified coordinates
# should be extracted or how values in the URDF should be overwritten.
cspace_to_urdf_rules:
# - {name: fixed_joint, rule: fixed, value: 0.025}
# RMPflow uses collision spheres to define the robot geometry in order to avoid
# collisions with external obstacles. If no spheres are specified, RMPflow will
# not be able to avoid obstacles.
# The example spheres specified are translated along the z axis of the link0 frame by
# .05 and .1 m respectively.
collision_spheres:
- base_link:
- "center": [0.0, 0.0, 0.06]
"radius": 0.06
- "center": [0.0, 0.0, 0.14]
"radius": 0.06
- shoulder_link:
- "center": [0.0, 0.06, 0.0]
"radius": 0.05
- "center": [0.0, 0.12, 0.0]
"radius": 0.05
- upper_arm_link:
- "center": [0.0, 0.0, 0.07]
"radius": 0.06
- "center": [0.0, 0.0, 0.14]
"radius": 0.06
- "center": [0.0, 0.0, 0.21]
"radius": 0.06
- "center": [0.0, 0.0, 0.28]
"radius": 0.06
- "center": [0.0, 0.0, 0.35]
"radius": 0.06
- "center": [0.0, 0.0, 0.42]
"radius": 0.06
- forearm_link:
- "center": [0.0, 0.0, 0.0]
"radius": 0.045
- "center": [0.0, 0.0, 0.05]
"radius": 0.045
- "center": [0.0, 0.0, 0.1]
"radius": 0.045
- "center": [0.0, 0.0, 0.15]
"radius": 0.045
- "center": [0.0, 0.0, 0.20]
"radius": 0.045
- "center": [0.0, 0.0, 0.25]
"radius": 0.045
- "center": [0.0, 0.0, 0.30]
"radius": 0.045
- "center": [0.0, 0.0, 0.35]
"radius": 0.045
- "center": [0.0, 0.0, 0.40]
"radius": 0.045
- wrist_1_link:
- "center": [0.0, -0.02, 0.0]
"radius": 0.03
- "center": [0.0, 0.05, 0.0]
"radius": 0.045
- "center": [0.0, 0.1, 0.0]
"radius": 0.045
- wrist_2_link:
- "center": [0.0, 0.0, -0.025]
"radius": 0.04
- "center": [0.0, 0.0, 0.05]
"radius": 0.04
- wrist_3_link:
- "center": [0.0, -0.025, 0.0]
"radius": 0.04
- "center": [0.0, 0.05, 0.0]
"radius": 0.04
- gripper_base:
- "center": [0.0, 0, 0.02]
"radius": 0.035
- "center": [0.0, 0, 0.06]
"radius": 0.035
# - "center": [0.015, 0.0, 0.0]
# "radius": 0.03
# - "center": [-0.015, 0.0, 0.0]
# "radius": 0.03
# - gripper_left:
# - "center": [0.0, 0, -0.0075]
# "radius": 0.0075
# - "center": [0.0, 0, 0.0075]
# "radius": 0.0075
# - "center": [0.0, -0.0075, -0.0075]
# "radius": 0.0075
# - "center": [0.0, -0.0075, 0.0075]
# "radius": 0.0075
# - "center": [0.0, 0.0075, -0.0075]
# "radius": 0.0075
# - "center": [0.0, 0.0075, 0.0075]
# "radius": 0.0075
# - gripper_right:
# - "center": [0.0, 0, -0.0075]
# "radius": 0.0075
# - "center": [0.0, 0, 0.0075]
# "radius": 0.0075
# - "center": [0.0, -0.0075, -0.0075]
# "radius": 0.0075
# - "center": [0.0, -0.0075, 0.0075]
# "radius": 0.0075
# - "center": [0.0, 0.0075, -0.0075]
# "radius": 0.0075
# - "center": [0.0, 0.0075, 0.0075]
# "radius": 0.0075
# This argument is no longer supported, but is required for legacy reasons.
# There is no need to change it.
composite_task_spaces: []
| 4,811 |
YAML
| 30.045161 | 85 | 0.523384 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/robot/assets/ur5/gripper/README.md
|
## Robotiq 2F 85 gripper
For this gripper, the following Github repo can be used as a reference: https://github.com/Shreeyak/robotiq.git
### mimic tag in URDF
This gripper is developed for ROS and uses the `mimic` tag within the URDF files to make the gripper move. From our research `mimic` tag within URDF is not supported by pybullet. To overcome this, one can use the `createConstraint` function. Please refer to [this](https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/mimicJointConstraint.py) example from the bullet3 repo to see how to replicate a `mimic` joint:
```python
#a mimic joint can act as a gear between two joints
#you can control the gear ratio in magnitude and sign (>0 reverses direction)
import pybullet as p
import time
p.connect(p.GUI)
p.loadURDF("plane.urdf",0,0,-2)
wheelA = p.loadURDF("differential/diff_ring.urdf",[0,0,0])
for i in range(p.getNumJoints(wheelA)):
print(p.getJointInfo(wheelA,i))
p.setJointMotorControl2(wheelA,i,p.VELOCITY_CONTROL,targetVelocity=0,force=0)
c = p.createConstraint(wheelA,1,wheelA,3,jointType=p.JOINT_GEAR,jointAxis =[0,1,0],parentFramePosition=[0,0,0],childFramePosition=[0,0,0])
p.changeConstraint(c,gearRatio=1, maxForce=10000)
c = p.createConstraint(wheelA,2,wheelA,4,jointType=p.JOINT_GEAR,jointAxis =[0,1,0],parentFramePosition=[0,0,0],childFramePosition=[0,0,0])
p.changeConstraint(c,gearRatio=-1, maxForce=10000)
c = p.createConstraint(wheelA,1,wheelA,4,jointType=p.JOINT_GEAR,jointAxis =[0,1,0],parentFramePosition=[0,0,0],childFramePosition=[0,0,0])
p.changeConstraint(c,gearRatio=-1, maxForce=10000)
p.setRealTimeSimulation(1)
while(1):
p.setGravity(0,0,-10)
time.sleep(0.01)
#p.removeConstraint(c)
```
Details on `createConstraint` can be found in the pybullet [getting started](https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#heading=h.fq749wu22x4c) guide.
### Files in folder
Since parameters like gear ratio and direction are required, one can find the `robotiq_2f_85_mimic_joints.urdf` which contains the mimic tags as in original URDF, which can be used as a reference. It was generated from `robotiq/robotiq_2f_robot/robot/simple_rq2f85_pybullet.urdf.xacro` as so:
```
rosrun xacro xacro --inorder simple_rq2f85_pybullet.urdf.xacro
adaptive_transmission:="true" > robotiq_2f_85_mimic_joints.urdf
```
The URDF meant for use in pybullet is `robotiq_2f_85.urdf` and it is generated in a similar manner as above by running:
```
rosrun xacro xacro --inorder simple_rq2f85_pybullet.urdf.xacro > robotiq_2f_85.urdf
```
| 2,574 |
Markdown
| 47.584905 | 440 | 0.773116 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/app.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Test a PoseCNN on images"""
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import os, sys
import numpy as np
from PIL import Image
import json
from scipy.spatial.transform import Rotation
import UOC.tools._init_paths
from fcn.test_module import img_segment, img_process, compute_xyz, depth_to_pc
import networks as networks
UOC_path = "E:/workspace/visual_match/UOC"
def get_cam():
filename = f'{UOC_path}/data/demo/camera_params.json'
if os.path.exists(filename):
with open(filename) as f:
camera_params = json.load(f)
else:
camera_params = None
return camera_params
def get_network():
gpu_id = 0
pretrained = f'{UOC_path}/data/checkpoints/seg_resnet34_8s_embedding_cosine_rgbd_add_sampling_epoch_16.checkpoint.pth'
pretrained_crop = f'{UOC_path}/data/checkpoints/seg_resnet34_8s_embedding_cosine_rgbd_add_crop_sampling_epoch_16.checkpoint.pth'
# pretrained = f'{UOC_path}/data/checkpoints/seg_resnet34_8s_embedding_cosine_color_sampling_epoch_16.checkpoint.pth'
# pretrained_crop = f'{UOC_path}/data/checkpoints/seg_resnet34_8s_embedding_cosine_color_crop_sampling_epoch_16.checkpoint.pth'
network_name = 'seg_resnet34_8s_embedding'
# device
device = torch.device('cuda:{:d}'.format(gpu_id))
num_classes = 2
train_num_unit = 64
network_data = torch.load(pretrained)
network = networks.__dict__[network_name](num_classes, train_num_unit, network_data).cuda(device=device)
network = torch.nn.DataParallel(network, device_ids=[gpu_id]).cuda(device=device)
cudnn.benchmark = True
network.eval()
network_data_crop = torch.load(pretrained_crop)
network_crop = networks.__dict__[network_name](num_classes, train_num_unit, network_data_crop).cuda(device=device)
network_crop = torch.nn.DataParallel(network_crop, device_ids=[gpu_id]).cuda(device=device)
network_crop.eval()
return network, network_crop, device
class Segmenter(object):
def __init__(self) -> None:
network, network_crop, device = get_network()
self.network = network
self.network_crop = network_crop
self.device = device
def segment(self, rgb: np.array, dep:np.array, camera_params:dict):
# dep is meter
rgb_batch, dep_batch = img_process(rgb, dep, camera_params)
out_label, out_label_refined = img_segment(rgb_batch, dep_batch, self.network, self.network_crop, self.device, False)
return out_label[0], out_label_refined[0]
def crop(self, rgb, label):
all_ids = np.unique(label)
all_imgs = []
bboxes = []
for i in list(all_ids):
if i == 0: continue
if torch.sum(label == i) < 32*32:
continue
x, y = np.where(label == i)
min_x = x.min()
max_x = x.max()
min_y = y.min()
max_y = y.max()
if (max_x - min_x) * (max_x - min_x) > 250**2:
continue
img = rgb[min_x:max_x, min_y:max_y]
all_imgs.append(img)
bboxes.append([min_x,max_x, min_y,max_y])
return all_imgs, bboxes
def segment_and_crop(self, rgb: np.array, dep:np.array, camera_params:dict):
label, label_refined = self.segment(rgb, dep, camera_params)
all_rgb, bbox = self.crop(rgb, label)
return all_rgb, bbox
def crop_point_cloud(self, dep, camera_params, bboxes):
c2w = camera_params['c2w']
cam = camera_params['cam']
# 因为 omniverse 里面的相机坐标系,和相机在世界坐标系中的坐标系是不一样的,相机朝向是Z轴负方向
# 所以 深度图计算出在相机坐标系内的点后,需要两步的坐标系转换,
# 从相机坐标系 --> 在世界坐标系中的相机坐标系(也就是绕x轴转180) --> 世界坐标系
rot_x = Rotation.from_rotvec(np.pi * np.array([1,0,0])).as_matrix()
c2w[:3,:3] = c2w[:3,:3] @ rot_x
pc = depth_to_pc(dep, cam, c2w)
ret = []
for bbox in bboxes:
min_x, max_x, min_y, max_y = bbox
points = pc[min_x:max_x, min_y:max_y]
points = points.reshape(-1,3)
ret.append(points)
return ret
if __name__ == '__main__':
np.random.seed(3)
# list images
images_color = []
images_depth = []
network, network_crop, device = get_network()
# camera_params = get_cam()
# rgb = cv2.imread("./data/demo/000002-color.png", cv2.COLOR_BGR2RGB)
# dep = cv2.imread("./data/demo/000002-depth.png", cv2.IMREAD_ANYDEPTH)
# dep = dep.astype(np.float32) / 1000.0
# rgb = cv2.imread("../robot/images/sc_rgb.png", cv2.COLOR_BGR2RGB)
rgb = np.array(Image.open("../robot/images/sc_rgb.png"))[:,:,:3][:,:,::-1].copy()
dep = np.load("../robot/images/sc_dep.npy")
cam = np.load("../robot/images/sc_cam.npy")
camera_params = {}
camera_params['x_offset'] = cam[0,0]
camera_params['y_offset'] = cam[1,1]
camera_params['fx'] = cam[0,2]
camera_params['fy'] = cam[1,2]
# dep = None
rgb_batch, dep_batch = img_process(rgb, dep, camera_params)
rgb_batch = torch.cat([rgb_batch], dim=0)
dep_batch = torch.cat([dep_batch], dim=0)
# if dep_batch is not None:
out_label, out_label_refined = img_segment(rgb_batch, dep_batch, network, network_crop, device, True)
| 5,556 |
Python
| 31.121387 | 132 | 0.614651 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/README.md
|
# 代码修改自:https://github.com/NVlabs/UnseenObjectClustering
# Unseen Object Clustering: Learning RGB-D Feature Embeddings for Unseen Object Instance Segmentation
### Introduction
In this work, we propose a new method for unseen object instance segmentation by learning RGB-D feature embeddings from synthetic data. A metric learning loss functionis utilized to learn to produce pixel-wise feature embeddings such that pixels from the same object are close to each other and pixels from different objects are separated in the embedding space. With the learned feature embeddings, a mean shift clustering algorithm can be applied to discover and segment unseen objects. We further improve the segmentation accuracy with a new two-stage clustering algorithm. Our method demonstrates that non-photorealistic synthetic RGB and depth images can be used to learn feature embeddings that transfer well to real-world images for unseen object instance segmentation. [arXiv](https://arxiv.org/pdf/2007.15157.pdf), [Talk video](https://youtu.be/pxma-x0BGpU)
<p align="center"><img src="./data/pics/network.png" width="750" height="200"/></p>
### License
Unseen Object Clustering is released under the NVIDIA Source Code License (refer to the LICENSE file for details).
### Citation
If you find Unseen Object Clustering useful in your research, please consider citing:
@inproceedings{xiang2020learning,
Author = {Yu Xiang and Christopher Xie and Arsalan Mousavian and Dieter Fox},
Title = {Learning RGB-D Feature Embeddings for Unseen Object Instance Segmentation},
booktitle = {Conference on Robot Learning (CoRL)},
Year = {2020}
}
### Required environment
- Ubuntu 16.04 or above
- PyTorch 0.4.1 or above
- CUDA 9.1 or above
### Installation
1. Install [PyTorch](https://pytorch.org/).
2. Install python packages
```Shell
pip install -r requirement.txt
```
### Download
- Download our trained checkpoints from [here](https://drive.google.com/file/d/1O-ymMGD_qDEtYxRU19zSv17Lgg6fSinQ/view?usp=sharing), save to $ROOT/data.
### Running the demo
1. Download our trained checkpoints first.
2. Run the following script for testing on images under $ROOT/data/demo.
```Shell
./experiments/scripts/demo_rgbd_add.sh
```
<p align="center"><img src="./data/pics/demo_rgbd_add.png" width="640" height="360"/></p>
### Training and testing on the Tabletop Object Dataset (TOD)
1. Download the Tabletop Object Dataset (TOD) from [here](https://drive.google.com/uc?export=download&id=1Du309Ye8J7v2c4fFGuyPGjf-C3-623vw) (34G).
2. Create a symlink for the TOD dataset
```Shell
cd $ROOT/data
ln -s $TOD_DATA tabletop
```
3. Training and testing on the TOD dataset
```Shell
cd $ROOT
# multi-gpu training, we used 4 GPUs
./experiments/scripts/seg_resnet34_8s_embedding_cosine_rgbd_add_train_tabletop.sh
# testing, $GPU_ID can be 0, 1, etc.
./experiments/scripts/seg_resnet34_8s_embedding_cosine_rgbd_add_test_tabletop.sh $GPU_ID $EPOCH
```
### Testing on the OCID dataset and the OSD dataset
1. Download the OCID dataset from [here](https://www.acin.tuwien.ac.at/en/vision-for-robotics/software-tools/object-clutter-indoor-dataset/), and create a symbol link:
```Shell
cd $ROOT/data
ln -s $OCID_dataset OCID
```
2. Download the OSD dataset from [here](https://www.acin.tuwien.ac.at/en/vision-for-robotics/software-tools/osd/), and create a symbol link:
```Shell
cd $ROOT/data
ln -s $OSD_dataset OSD
```
3. Check scripts in experiments/scripts with name test_ocid or test_ocd. Make sure the path of the trained checkpoints exist.
```Shell
experiments/scripts/seg_resnet34_8s_embedding_cosine_rgbd_add_test_ocid.sh
experiments/scripts/seg_resnet34_8s_embedding_cosine_rgbd_add_test_osd.sh
```
### Running with ROS on a Realsense camera for real-world unseen object instance segmentation
- Python2 is needed for ROS.
- Make sure our pretrained checkpoints are downloaded.
```Shell
# start realsense
roslaunch realsense2_camera rs_aligned_depth.launch tf_prefix:=measured/camera
# start rviz
rosrun rviz rviz -d ./ros/segmentation.rviz
# run segmentation, $GPU_ID can be 0, 1, etc.
./experiments/scripts/ros_seg_rgbd_add_test_segmentation_realsense.sh $GPU_ID
```
Our example:
<p align="center"><img src="./data/pics/unseen_clustering.gif"/></p>
| 4,436 |
Markdown
| 35.07317 | 866 | 0.731064 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/LICENSE.md
|
# NVIDIA Source Code License for Unseen Object Clustering: Learning RGB-D Feature Embeddings for Unseen Object Instance Segmentation
## 1. Definitions
“Licensor” means any person or entity that distributes its Work.
“Software” means the original work of authorship made available under this License.
“Work” means the Software and any additions to or derivative works of the Software that are made available under this License.
“Nvidia Processors” means any central processing unit (CPU), graphics processing unit (GPU), field-programmable gate array (FPGA), application-specific integrated circuit (ASIC) or any combination thereof designed, made, sold, or provided by Nvidia or its affiliates.
The terms “reproduce,” “reproduction,” “derivative works,” and “distribution” have the meaning as provided under U.S. copyright law; provided, however, that for the purposes of this License, derivative works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work.
Works, including the Software, are “made available” under this License by including in or with the Work either (a) a copyright notice referencing the applicability of this License to the Work, or (b) a copy of this License.
## 2. License Grants
### 2.1 Copyright Grant.
Subject to the terms and conditions of this License, each Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense and distribute its Work and any resulting derivative works in any form.
## 3. Limitations
### 3.1 Redistribution.
You may reproduce or distribute the Work only if (a) you do so under this License, (b) you include a complete copy of this License with your distribution, and (c) you retain without modification any copyright, patent, trademark, or attribution notices that are present in the Work.
### 3.2 Derivative Works.
You may specify that additional or different terms apply to the use, reproduction, and distribution of your derivative works of the Work (“Your Terms”) only if (a) Your Terms provide that the use limitation in Section 3.3 applies to your derivative works, and (b) you identify the specific derivative works that are subject to Your Terms. Notwithstanding Your Terms, this License (including the redistribution requirements in Section 3.1) will continue to apply to the Work itself.
### 3.3 Use Limitation.
The Work and any derivative works thereof only may be used or intended for use non-commercially. The Work or derivative works thereof may be used or intended for use by Nvidia or its affiliates commercially or non-commercially. As used herein, “non-commercially” means for research or evaluation purposes only.
### 3.4 Patent Claims.
If you bring or threaten to bring a patent claim against any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) to enforce any patents that you allege are infringed by any Work, then your rights under this License from such Licensor (including the grants in Sections 2.1 and 2.2) will terminate immediately.
### 3.5 Trademarks.
This License does not grant any rights to use any Licensor’s or its affiliates’ names, logos, or trademarks, except as necessary to reproduce the notices described in this License.
### 3.6 Termination.
If you violate any term of this License, then your rights under this License (including the grants in Sections 2.1 and 2.2) will terminate immediately.
## 4. Disclaimer of Warranty.
THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF M ERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE.
## 5. Limitation of Liability.
EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER COMM ERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
| 4,469 |
Markdown
| 90.224488 | 593 | 0.791005 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/test_net.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Test a DeepIM network on an image database."""
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import argparse
import pprint
import time, os, sys
import os.path as osp
import numpy as np
import random
import scipy.io
import _init_paths
from fcn.test_dataset import test_segnet
from fcn.config import cfg, cfg_from_file, get_output_dir
from datasets.factory import get_dataset
import networks
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Unseen Clustering Network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--pretrained', dest='pretrained',
help='initialize with pretrained checkpoint',
default=None, type=str)
parser.add_argument('--pretrained_crop', dest='pretrained_crop',
help='initialize with pretrained checkpoint for crops',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--dataset', dest='dataset_name',
help='dataset to train on',
default='shapenet_scene_train', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if len(cfg.TEST.CLASSES) == 0:
cfg.TEST.CLASSES = cfg.TRAIN.CLASSES
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
# device
cfg.gpu_id = 0
cfg.device = torch.device('cuda:{:d}'.format(cfg.gpu_id))
print('GPU device {:d}'.format(args.gpu_id))
# prepare dataset
if cfg.TEST.VISUALIZE:
shuffle = True
np.random.seed()
else:
shuffle = False
cfg.MODE = 'TEST'
dataset = get_dataset(args.dataset_name)
worker_init_fn = dataset.worker_init_fn if hasattr(dataset, 'worker_init_fn') else None
num_workers = 1
dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.TEST.IMS_PER_BATCH, shuffle=shuffle,
num_workers=num_workers, worker_init_fn=worker_init_fn)
print('Use dataset `{:s}` for training'.format(dataset.name))
# overwrite intrinsics
if len(cfg.INTRINSICS) > 0:
K = np.array(cfg.INTRINSICS).reshape(3, 3)
dataset._intrinsic_matrix = K
print(dataset._intrinsic_matrix)
output_dir = get_output_dir(dataset, None)
print('Output will be saved to `{:s}`'.format(output_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# prepare network
if args.pretrained:
network_data = torch.load(args.pretrained)
if isinstance(network_data, dict) and 'model' in network_data:
network_data = network_data['model']
print("=> using pre-trained network '{}'".format(args.pretrained))
else:
network_data = None
print("no pretrained network specified")
sys.exit()
network = networks.__dict__[args.network_name](dataset.num_classes, cfg.TRAIN.NUM_UNITS, network_data).cuda(device=cfg.device)
network = torch.nn.DataParallel(network, device_ids=[cfg.gpu_id]).cuda(device=cfg.device)
cudnn.benchmark = True
if args.pretrained_crop:
network_data_crop = torch.load(args.pretrained_crop)
network_crop = networks.__dict__[args.network_name](dataset.num_classes, cfg.TRAIN.NUM_UNITS, network_data_crop).cuda(device=cfg.device)
network_crop = torch.nn.DataParallel(network_crop, device_ids=[cfg.gpu_id]).cuda(device=cfg.device)
else:
network_crop = None
# test network
test_segnet(dataloader, network, output_dir, network_crop)
| 4,656 |
Python
| 34.280303 | 144 | 0.635309 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/view_tabletop_data.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import os
import os.path
import torch
import cv2
import numpy as np
import glob
import random
import math
from transforms3d.quaternions import mat2quat, quat2mat
import _init_paths
from datasets import TableTopObject
import matplotlib.pyplot as plt
from utils import mask as util_
if __name__ == '__main__':
tabletop = TableTopObject('train')
num = tabletop._size
index = np.random.permutation(num)
for idx in index:
# Get scene directory, crop dose not use background
scene_idx = idx // tabletop.NUM_VIEWS_PER_SCENE
scene_dir = tabletop.scene_dirs[scene_idx]
# Get view number
view_num = idx % tabletop.NUM_VIEWS_PER_SCENE
# Label
foreground_labels_filename = os.path.join(scene_dir, 'segmentation_%05d.png' % view_num)
# label = util_.imread_indexed(foreground_labels_filename)
label = cv2.imread(foreground_labels_filename)
# BGR image
filename = os.path.join(scene_dir, 'rgb_%05d.jpeg' % view_num)
im = cv2.imread(filename)
# Depth image
depth_img_filename = os.path.join(scene_dir, 'depth_%05d.png' % view_num)
im_depth = cv2.imread(depth_img_filename, cv2.IMREAD_ANYDEPTH)
# visualization
fig = plt.figure()
ax = fig.add_subplot(1, 3, 1)
plt.imshow(im[:, :, (2, 1, 0)])
plt.axis('off')
ax = fig.add_subplot(1, 3, 2)
plt.imshow(im_depth)
plt.axis('off')
ax = fig.add_subplot(1, 3, 3)
plt.imshow(label[:, :, (2, 1, 0)])
plt.axis('off')
plt.show()
| 1,774 |
Python
| 28.583333 | 96 | 0.632469 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/test_images.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Test a PoseCNN on images"""
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import argparse
import pprint
import time, os, sys
import os.path as osp
import numpy as np
import cv2
import scipy.io
import glob
import json
import _init_paths
from fcn.test_dataset import test_sample
from fcn.config import cfg, cfg_from_file, get_output_dir
import networks
from utils.blob import pad_im
from utils import mask as util_
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a PoseCNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--pretrained', dest='pretrained',
help='initialize with pretrained checkpoint',
default=None, type=str)
parser.add_argument('--pretrained_crop', dest='pretrained_crop',
help='initialize with pretrained checkpoint for crops',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--dataset', dest='dataset_name',
help='dataset to train on',
default='shapenet_scene_train', type=str)
parser.add_argument('--depth', dest='depth_name',
help='depth image pattern',
default='*depth.png', type=str)
parser.add_argument('--color', dest='color_name',
help='color image pattern',
default='*color.png', type=str)
parser.add_argument('--imgdir', dest='imgdir',
help='path of the directory with the test images',
default=None, type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
parser.add_argument('--image_path', dest='image_path',
help='path to images', default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
# save data
def save_data(file_rgb, out_label_refined, roi, features_crop):
# meta data
'''
meta = {'roi': roi, 'features': features_crop.cpu().detach().numpy(), 'labels': out_label_refined.cpu().detach().numpy()}
filename = file_rgb[:-9] + 'meta.mat'
scipy.io.savemat(filename, meta, do_compression=True)
print('save data to {}'.format(filename))
'''
# segmentation labels
label_save = out_label_refined.cpu().detach().numpy()[0]
label_save = np.clip(label_save, 0, 1) * 255
label_save = label_save.astype(np.uint8)
filename = file_rgb[:-4] + '-label.png'
cv2.imwrite(filename, label_save)
print('save data to {}'.format(filename))
def compute_xyz(depth_img, fx, fy, px, py, height, width):
indices = util_.build_matrix_of_indices(height, width)
z_e = depth_img
x_e = (indices[..., 1] - px) * z_e / fx
y_e = (indices[..., 0] - py) * z_e / fy
xyz_img = np.stack([x_e, y_e, z_e], axis=-1) # Shape: [H x W x 3]
return xyz_img
def read_sample(filename_color, filename_depth, camera_params):
# bgr image
im = cv2.imread(filename_color)
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
# depth image
depth_img = cv2.imread(filename_depth, cv2.IMREAD_ANYDEPTH)
depth = depth_img.astype(np.float32) / 1000.0
height = depth.shape[0]
width = depth.shape[1]
fx = camera_params['fx']
fy = camera_params['fy']
px = camera_params['x_offset']
py = camera_params['y_offset']
xyz_img = compute_xyz(depth, fx, fy, px, py, height, width)
else:
xyz_img = None
im_tensor = torch.from_numpy(im) / 255.0
pixel_mean = torch.tensor(cfg.PIXEL_MEANS / 255.0).float()
im_tensor -= pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
sample = {'image_color': image_blob.unsqueeze(0)}
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
sample['depth'] = depth_blob.unsqueeze(0)
return sample
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if len(cfg.TEST.CLASSES) == 0:
cfg.TEST.CLASSES = cfg.TRAIN.CLASSES
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
# device
cfg.gpu_id = 0
cfg.device = torch.device('cuda:{:d}'.format(cfg.gpu_id))
cfg.instance_id = 0
num_classes = 2
cfg.MODE = 'TEST'
print('GPU device {:d}'.format(args.gpu_id))
# list images
images_color = []
filename = os.path.join(args.imgdir, args.color_name)
files = glob.glob(filename)
for i in range(len(files)):
filename = files[i]
images_color.append(filename)
images_color.sort()
images_depth = []
filename = os.path.join(args.imgdir, args.depth_name)
files = glob.glob(filename)
for i in range(len(files)):
filename = files[i]
images_depth.append(filename)
images_depth.sort()
# check if intrinsics available
filename = os.path.join(args.imgdir, 'camera_params.json')
if os.path.exists(filename):
with open(filename) as f:
camera_params = json.load(f)
else:
camera_params = None
# prepare network
if args.pretrained:
network_data = torch.load(args.pretrained)
print("=> using pre-trained network '{}'".format(args.pretrained))
else:
network_data = None
print("no pretrained network specified")
sys.exit()
network = networks.__dict__[args.network_name](num_classes, cfg.TRAIN.NUM_UNITS, network_data).cuda(device=cfg.device)
network = torch.nn.DataParallel(network, device_ids=[cfg.gpu_id]).cuda(device=cfg.device)
cudnn.benchmark = True
network.eval()
if args.pretrained_crop:
network_data_crop = torch.load(args.pretrained_crop)
network_crop = networks.__dict__[args.network_name](num_classes, cfg.TRAIN.NUM_UNITS, network_data_crop).cuda(device=cfg.device)
network_crop = torch.nn.DataParallel(network_crop, device_ids=[cfg.gpu_id]).cuda(device=cfg.device)
network_crop.eval()
else:
network_crop = None
if cfg.TEST.VISUALIZE:
index_images = np.random.permutation(len(images_color))
else:
index_images = range(len(images_color))
for i in index_images:
if os.path.exists(images_color[i]):
print(images_color[i])
# read sample
sample = read_sample(images_color[i], images_depth[i], camera_params)
# run network
out_label, out_label_refined = test_sample(sample, network, network_crop)
else:
print('files not exist %s' % (images_color[i]))
| 7,617 |
Python
| 32.707964 | 136 | 0.604306 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/train_net.py
|
#!/usr/bin/env python3
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Train a UCN on image segmentation database."""
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import argparse
import pprint
import numpy as np
import sys
import os
import os.path as osp
import cv2
import _init_paths
import datasets
import networks
from fcn.config import cfg, cfg_from_file, get_output_dir
from fcn.train import train_segnet
from datasets.factory import get_dataset
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a PoseCNN network')
parser.add_argument('--epochs', dest='epochs',
help='number of epochs to train',
default=40000, type=int)
parser.add_argument('--startepoch', dest='startepoch',
help='the starting epoch',
default=0, type=int)
parser.add_argument('--pretrained', dest='pretrained',
help='initialize with pretrained checkpoint',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--solver', dest='solver',
help='solver type',
default='sgd', type=str)
parser.add_argument('--dataset', dest='dataset_name',
help='dataset to train on',
default='shapenet_scene_train', type=str)
parser.add_argument('--dataset_background', dest='dataset_background_name',
help='background dataset to train on',
default='background_nvidia', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--network', dest='network_name',
help='name of the network',
default=None, type=str)
parser.add_argument('--cad', dest='cad_name',
help='name of the CAD files',
default=None, type=str)
parser.add_argument('--pose', dest='pose_name',
help='name of the pose files',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
# prepare dataset
cfg.MODE = 'TRAIN'
dataset = get_dataset(args.dataset_name)
worker_init_fn = dataset.worker_init_fn if hasattr(dataset, 'worker_init_fn') else None
num_workers = 4
dataloader = torch.utils.data.DataLoader(dataset, batch_size=cfg.TRAIN.IMS_PER_BATCH, shuffle=True,
num_workers=num_workers, worker_init_fn=worker_init_fn)
print('Use dataset `{:s}` for training'.format(dataset.name))
# overwrite intrinsics
if len(cfg.INTRINSICS) > 0:
K = np.array(cfg.INTRINSICS).reshape(3, 3)
dataset._intrinsic_matrix = K
print(dataset._intrinsic_matrix)
output_dir = get_output_dir(dataset, None)
print('Output will be saved to `{:s}`'.format(output_dir))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# prepare network
if args.pretrained:
network_data = torch.load(args.pretrained)
if isinstance(network_data, dict) and 'model' in network_data:
network_data = network_data['model']
print("=> using pre-trained network '{}'".format(args.network_name))
else:
network_data = None
print("=> creating network '{}'".format(args.network_name))
network = networks.__dict__[args.network_name](dataset.num_classes, cfg.TRAIN.NUM_UNITS, network_data).cuda()
if torch.cuda.device_count() > 1:
cfg.TRAIN.GPUNUM = torch.cuda.device_count()
print("Let's use", torch.cuda.device_count(), "GPUs!")
network = torch.nn.DataParallel(network).cuda()
cudnn.benchmark = True
# prepare optimizer
assert(args.solver in ['adam', 'sgd'])
print('=> setting {} solver'.format(args.solver))
param_groups = [{'params': network.module.bias_parameters(), 'weight_decay': cfg.TRAIN.WEIGHT_DECAY},
{'params': network.module.weight_parameters(), 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.solver == 'adam':
optimizer = torch.optim.Adam(param_groups, cfg.TRAIN.LEARNING_RATE,
betas=(cfg.TRAIN.MOMENTUM, cfg.TRAIN.BETA))
elif args.solver == 'sgd':
optimizer = torch.optim.SGD(param_groups, cfg.TRAIN.LEARNING_RATE,
momentum=cfg.TRAIN.MOMENTUM)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[m - args.startepoch for m in cfg.TRAIN.MILESTONES], gamma=cfg.TRAIN.GAMMA)
cfg.epochs = args.epochs
# main loop
for epoch in range(args.startepoch, args.epochs):
if args.solver == 'sgd':
scheduler.step()
# train for one epoch
train_segnet(dataloader, network, optimizer, epoch)
# save checkpoint
if (epoch+1) % cfg.TRAIN.SNAPSHOT_EPOCHS == 0 or epoch == args.epochs - 1:
state = network.module.state_dict()
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (cfg.TRAIN.SNAPSHOT_PREFIX + infix + '_epoch_{:d}'.format(epoch+1) + '.checkpoint.pth')
torch.save(state, os.path.join(output_dir, filename))
print(filename)
| 6,221 |
Python
| 37.645962 | 154 | 0.602636 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/plot_epochs.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rc
t = np.arange(start=1, stop=17, step=1)
rc('font', weight='bold')
# UCN RGB
F1_overlap_rgb = (0.206197, 0.357676, 0.345578, 0.455547, 0.457957, 0.502031, 0.457381, 0.518946, 0.552038, 0.520665, \
0.50691, 0.519003, 0.550123, 0.514964, 0.514002, 0.593742)
F1_boundary_rgb = (0.094635, 0.167361, 0.172439, 0.237247, 0.244162, 0.270599, 0.258174, 0.309146, 0.327651, 0.316562, \
0.322138, 0.311927, 0.344229, 0.308593, 0.329168, 0.364516)
percentage_rgb = (0.263843, 0.355614, 0.356287, 0.414091, 0.405764, 0.431488, 0.435723, 0.464883, 0.471861, 0.475419, \
0.503157, 0.497285, 0.506068, 0.473288, 0.497579, 0.480164)
# UCN Depth
F1_overlap_depth = (0.632557, 0.745917, 0.775232, 0.802915, 0.82635, 0.834976, 0.843941, 0.836614, 0.857734, 0.858773, \
0.846244, 0.853272, 0.843275, 0.8384, 0.846614, 0.864338)
F1_boundary_depth = (0.219215, 0.327336, 0.414885, 0.471119, 0.590424, 0.615502, 0.668548, 0.656816, 0.714789, 0.726485, \
0.721683, 0.717682, 0.723354, 0.724921, 0.738028, 0.756031)
percentage_depth = (0.463543, 0.572034, 0.607148, 0.654096, 0.700107, 0.700688, 0.72621, 0.719467, 0.76059, 0.751082, \
0.733714, 0.735936, 0.712744, 0.723239, 0.726254, 0.753693)
# UCN RGBD early
F1_overlap_rgbd_early = (0.357674, 0.553803, 0.607327, 0.661596, 0.707028, 0.721938, 0.741733, 0.77255, 0.795557, 0.735402, \
0.806955, 0.758339, 0.800102, 0.815694, 0.799456, 0.828135)
F1_boundary_rgbd_early = (0.128438, 0.281023, 0.362007, 0.432142, 0.481427, 0.476286, 0.510337, 0.559285, 0.595986, 0.535778, \
0.621609, 0.593379, 0.59994, 0.646276, 0.637706, 0.672144)
percentage_rgbd_early = (0.290032, 0.420344, 0.497644, 0.555368, 0.597204, 0.576219, 0.598361, 0.665128, 0.687534, 0.635226, \
0.683646, 0.670646, 0.677623, 0.698645, 0.716388, 0.735246)
# UCN RGBD add
F1_overlap_rgbd_add = (0.514279, 0.662002, 0.795837, 0.788407, 0.795113, 0.842289, 0.824394, 0.854453, 0.847598, 0.865754, \
0.855248, 0.85502, 0.857568, 0.856234, 0.840809, 0.884881)
F1_boundary_rgbd_add = (0.245276, 0.324417, 0.549822, 0.534663, 0.576119, 0.679746, 0.639074, 0.705335, 0.722362, 0.742819, \
0.749845, 0.73857, 0.758677, 0.755076, 0.739145, 0.787763)
percentage_rgbd_add = (0.491431, 0.538068, 0.661125, 0.675489, 0.695592, 0.742781, 0.731744, 0.744917, 0.736696, 0.766834, \
0.747862, 0.741274, 0.76629, 0.747441, 0.723242, 0.821638)
# UCN RGBD cat
F1_overlap_rgbd_cat = (0.441337, 0.591691, 0.747262, 0.727342, 0.807502, 0.817291, 0.816996, 0.827194, 0.831351, 0.841048, \
0.808059, 0.834401, 0.835638, 0.835728, 0.806224, 0.828991)
F1_boundary_rgbd_cat = (0.190999, 0.286006, 0.397822, 0.452141, 0.567425, 0.576083, 0.598294, 0.645848, 0.670346, 0.682605, \
0.587685, 0.674055, 0.713088, 0.700418, 0.607698, 0.685053)
percentage_rgbd_cat = (0.475042, 0.531699, 0.617873, 0.639375, 0.673361, 0.678608, 0.677335, 0.701095, 0.705839, 0.709701, \
0.662733, 0.7124, 0.724381, 0.71867, 0.676644, 0.682604)
# create plot
size = 12
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.plot(t, F1_overlap_rgb, marker='o', color='r')
plt.plot(t, F1_overlap_depth, marker='o', color='g')
plt.plot(t, F1_overlap_rgbd_early, marker='o', color='b')
plt.plot(t, F1_overlap_rgbd_add, marker='o', color='c')
plt.plot(t, F1_overlap_rgbd_cat, marker='o', color='y')
ax.set_title('F Overlap', fontsize=size, fontweight='bold')
plt.xticks(t, fontsize=size)
plt.yticks(fontsize=size)
plt.xlabel('epoch', fontsize=size, fontweight='bold')
ax.legend(['UCN RGB', 'UCN Depth', 'UCN RGBD early', 'UCN RGBD add', 'UCN RGBD concat'], fontsize=size)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.plot(t, F1_boundary_rgb, marker='o', color='r')
plt.plot(t, F1_boundary_depth, marker='o', color='g')
plt.plot(t, F1_boundary_rgbd_early, marker='o', color='b')
plt.plot(t, F1_boundary_rgbd_add, marker='o', color='c')
plt.plot(t, F1_boundary_rgbd_cat, marker='o', color='y')
ax.set_title('F Boundary', fontsize=size, fontweight='bold')
plt.xticks(t, fontsize=size)
plt.yticks(fontsize=size)
plt.xlabel('epoch', fontsize=size, fontweight='bold')
ax.legend(['UCN RGB', 'UCN Depth', 'UCN RGBD early', 'UCN RGBD add', 'UCN RGBD concat'], fontsize=size)
plt.show()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.plot(t, percentage_rgb, marker='o', color='r')
plt.plot(t, percentage_depth, marker='o', color='g')
plt.plot(t, percentage_rgbd_early, marker='o', color='b')
plt.plot(t, percentage_rgbd_add, marker='o', color='c')
plt.plot(t, percentage_rgbd_cat, marker='o', color='y')
ax.set_title('%75', fontsize=size, fontweight='bold')
plt.xticks(t, fontsize=size)
plt.yticks(fontsize=size)
plt.xlabel('epoch', fontsize=size, fontweight='bold')
ax.legend(['UCN RGB', 'UCN Depth', 'UCN RGBD early', 'UCN RGBD add', 'UCN RGBD concat'], fontsize=size)
plt.show()
| 5,347 |
Python
| 48.06422 | 127 | 0.649523 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/_init_paths.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Set up paths for UCN"""
import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, '..', 'lib')
add_path(lib_path)
| 446 |
Python
| 22.526315 | 83 | 0.690583 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/plot_bar_charts.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import numpy as np
import matplotlib.pyplot as plt
# data to plot
n_groups = 4
# F1_maskrcnn = (62.7, 84.7, 78.1, 76.6, 76.0)
# F1_ours = (59.4, 86.4, 82.8, 88.5, 82.9)
# F1_maskrcnn = (54.6, 78.8, 70.8, 64.3, 64.7)
# F1_ours = (36.5, 75.6, 67.2, 78.8, 68.5)
# F1_maskrcnn = (59.4, 86.4, 82.8, 88.5, 82.9)
# F1_ours = (58.1, 86.4, 84.0, 87.8, 85.1)
# F1_maskrcnn = (36.5, 75.6, 67.2, 78.8, 68.5)
# F1_ours = (40.8, 79.6, 72.5, 82.3, 78.3)
# F1_overlap = (84.7, 81.7, 86.4, 87.8)
# F1_boundary = (78.8, 71.4, 76.2, 82.3)
# percentage = (72.7, 69.1, 77.2, 85.6)
F1_overlap = (80.6, 79.9, 83.3, 87.4)
F1_boundary = (54.6, 65.6, 71.2, 69.4)
percentage = (77.6, 71.9, 73.8, 83.2)
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.8
rects1 = plt.bar(index, F1_overlap, bar_width,
alpha=opacity,
color='b',
label='F1_overlap')
rects2 = plt.bar(index + bar_width, F1_boundary, bar_width,
alpha=opacity,
color='g',
label='F1_boundary')
rects3 = plt.bar(index + 2 * bar_width, percentage, bar_width,
alpha=opacity,
color='r',
label='%75')
plt.xlabel('Methods')
# plt.ylabel('F1 boundary')
plt.title('OSD (111 images)')
plt.xticks(index + bar_width, ('MRCNN Depth', 'UOIS-2D', 'UOIS-3D', 'Ours'))
plt.legend(loc='lower left')
labels = F1_overlap
for i, v in enumerate(labels):
ax.text(i-.2, v+1,
labels[i],
fontsize=12,
color='k')
labels = F1_boundary
for i, v in enumerate(labels):
ax.text(i+.1, v+1,
labels[i],
fontsize=12,
color='k')
labels = percentage
for i, v in enumerate(labels):
ax.text(i+.35, v+1,
labels[i],
fontsize=12,
color='k')
plt.tight_layout()
plt.show()
| 1,952 |
Python
| 22.817073 | 83 | 0.587602 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/tools/dataset_statistics.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import os
import os.path
import torch
import cv2
import numpy as np
import glob
import random
import math
from transforms3d.quaternions import mat2quat, quat2mat
import _init_paths
from datasets import OCIDObject, OSDObject
import matplotlib.pyplot as plt
from utils import mask as util_
if __name__ == '__main__':
dataset = OSDObject('test')
num = dataset._size
num_objects = []
for i in range(num):
filename = str(dataset.image_files[i])
# labels_filename = filename.replace('rgb', 'label')
labels_filename = filename.replace('image_color', 'annotation')
foreground_labels = util_.imread_indexed(labels_filename)
# mask table as background
foreground_labels[foreground_labels == 1] = 0
if 'table' in labels_filename:
foreground_labels[foreground_labels == 2] = 0
foreground_labels = dataset.process_label(foreground_labels)
n = len(np.unique(foreground_labels)) - 1
num_objects.append(n)
print(labels_filename, n)
nums = np.array(num_objects)
print('min: %d' % (np.min(nums)))
print('max: %d' % (np.max(nums)))
print('mean: %f' % (np.mean(nums)))
| 1,366 |
Python
| 30.790697 | 83 | 0.669839 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/experiments/cfgs/seg_resnet34_8s_embedding_cosine_rgbd_add_crop_tabletop.yml
|
EXP_DIR: tabletop_object
INPUT: RGBD
TRAIN:
TRAINABLE: True
WEIGHT_DECAY: 0.0005
LEARNING_RATE: 0.00001
MILESTONES: !!python/tuple [3]
MOMENTUM: 0.9
BETA: 0.999
GAMMA: 0.1
SCALES_BASE: !!python/tuple [1.0]
IMS_PER_BATCH: 16
NUM_UNITS: 64
HARD_LABEL_THRESHOLD: 0.9
HARD_LABEL_SAMPLING: 0.0
HARD_ANGLE: 5.0
HOUGH_LABEL_THRESHOLD: 100
HOUGH_VOTING_THRESHOLD: 10
HOUGH_SKIP_PIXELS: 10
FG_THRESH: 0.5
FG_THRESH_POSE: 0.5
SNAPSHOT_INFIX: resnet34_8s_embedding_cosine_rgbd_add_crop_sampling
SNAPSHOT_EPOCHS: 1
SNAPSHOT_PREFIX: seg
USE_FLIPPED: False
CHROMATIC: True
ADD_NOISE: True
VISUALIZE: False
VERTEX_REG: True
POSE_REG: False # no rotation regression
SLIM: True
CHANGE_BACKGROUND: False
FUSION_TYPE: add
# synthetic data
SYN_CROP: True
SYN_CROP_SIZE: 224
min_padding_percentage: 0.1
max_padding_percentage: 0.5
SYNTHESIZE: True
SYNNUM: 80000
SYN_RATIO: 5
SYN_BACKGROUND_SPECIFIC: False
SYN_BACKGROUND_SUBTRACT_MEAN: True
SYN_BACKGROUND_CONSTANT_PROB: 0.2
SYN_TABLE_PROB: 0.9
SYN_SAMPLE_OBJECT: True
SYN_SAMPLE_POSE: False
SYN_MIN_OBJECT: 3
SYN_MAX_OBJECT: 5
SYN_TNEAR: 0.2
SYN_TFAR: 0.6
SYN_BOUND: 0.05
SYN_STD_ROTATION: 15
SYN_STD_TRANSLATION: 0.05
# embedding training parameters
EMBEDDING_PRETRAIN: True
EMBEDDING_SAMPLING: True
EMBEDDING_SAMPLING_NUM: 1000
EMBEDDING_NORMALIZATION: True
EMBEDDING_METRIC: cosine
EMBEDDING_ALPHA: 0.02
EMBEDDING_DELTA: 0.5
EMBEDDING_LAMBDA_INTRA: 10.0
EMBEDDING_LAMBDA_INTER: 10.0
TEST:
SINGLE_FRAME: True
HOUGH_LABEL_THRESHOLD: 400
HOUGH_VOTING_THRESHOLD: 10
IMS_PER_BATCH: 1
HOUGH_SKIP_PIXELS: 10
DET_THRESHOLD: 0.1
SCALES_BASE: !!python/tuple [1.0]
VISUALIZE: False
SYNTHESIZE: True
POSE_REFINE: False
ROS_CAMERA: D415
| 1,807 |
YAML
| 22.480519 | 69 | 0.71057 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/datasets/__init__.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
from .imdb import imdb
from .tabletop_object import TableTopObject
from .osd_object import OSDObject
from .ocid_object import OCIDObject
import os.path as osp
ROOT_DIR = osp.join(osp.dirname(__file__), '..', '..')
| 396 |
Python
| 32.083331 | 83 | 0.752525 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/datasets/factory.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Factory method for easily getting imdbs by name."""
__sets = {}
import datasets.tabletop_object
import datasets.osd_object
import datasets.ocid_object
import numpy as np
# tabletop object dataset
for split in ['train', 'test', 'all']:
name = 'tabletop_object_{}'.format(split)
print(name)
__sets[name] = (lambda split=split:
datasets.TableTopObject(split))
# OSD object dataset
for split in ['test']:
name = 'osd_object_{}'.format(split)
print(name)
__sets[name] = (lambda split=split:
datasets.OSDObject(split))
# OCID object dataset
for split in ['test']:
name = 'ocid_object_{}'.format(split)
print(name)
__sets[name] = (lambda split=split:
datasets.OCIDObject(split))
def get_dataset(name):
"""Get an imdb (image database) by name."""
if name not in __sets:
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_datasets():
"""List all registered imdbs."""
return __sets.keys()
| 1,201 |
Python
| 26.318181 | 83 | 0.65612 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/datasets/tabletop_object.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.utils.data as data
import os, math
import sys
import time
import random
import numpy as np
import numpy.random as npr
import cv2
import glob
import matplotlib.pyplot as plt
import datasets
from fcn.config import cfg
from utils.blob import chromatic_transform, add_noise
from utils import augmentation
from utils import mask as util_
data_loading_params = {
# Camera/Frustum parameters
'img_width' : 640,
'img_height' : 480,
'near' : 0.01,
'far' : 100,
'fov' : 45, # vertical field of view in degrees
'use_data_augmentation' : True,
# Multiplicative noise
'gamma_shape' : 1000.,
'gamma_scale' : 0.001,
# Additive noise
'gaussian_scale' : 0.005, # 5mm standard dev
'gp_rescale_factor' : 4,
# Random ellipse dropout
'ellipse_dropout_mean' : 10,
'ellipse_gamma_shape' : 5.0,
'ellipse_gamma_scale' : 1.0,
# Random high gradient dropout
'gradient_dropout_left_mean' : 15,
'gradient_dropout_alpha' : 2.,
'gradient_dropout_beta' : 5.,
# Random pixel dropout
'pixel_dropout_alpha' : 1.,
'pixel_dropout_beta' : 10.,
}
def compute_xyz(depth_img, camera_params):
""" Compute ordered point cloud from depth image and camera parameters.
If focal lengths fx,fy are stored in the camera_params dictionary, use that.
Else, assume camera_params contains parameters used to generate synthetic data (e.g. fov, near, far, etc)
@param depth_img: a [H x W] numpy array of depth values in meters
@param camera_params: a dictionary with parameters of the camera used
"""
# Compute focal length from camera parameters
if 'fx' in camera_params and 'fy' in camera_params:
fx = camera_params['fx']
fy = camera_params['fy']
else: # simulated data
aspect_ratio = camera_params['img_width'] / camera_params['img_height']
e = 1 / (np.tan(np.radians(camera_params['fov']/2.)))
t = camera_params['near'] / e; b = -t
r = t * aspect_ratio; l = -r
alpha = camera_params['img_width'] / (r-l) # pixels per meter
focal_length = camera_params['near'] * alpha # focal length of virtual camera (frustum camera)
fx = focal_length; fy = focal_length
if 'x_offset' in camera_params and 'y_offset' in camera_params:
x_offset = camera_params['x_offset']
y_offset = camera_params['y_offset']
else: # simulated data
x_offset = camera_params['img_width']/2
y_offset = camera_params['img_height']/2
indices = util_.build_matrix_of_indices(camera_params['img_height'], camera_params['img_width'])
z_e = depth_img
x_e = (indices[..., 1] - x_offset) * z_e / fx
y_e = (indices[..., 0] - y_offset) * z_e / fy
xyz_img = np.stack([x_e, y_e, z_e], axis=-1) # Shape: [H x W x 3]
return xyz_img
class TableTopObject(data.Dataset, datasets.imdb):
def __init__(self, image_set, tabletop_object_path = None):
self._name = 'tabletop_object_' + image_set
self._image_set = image_set
self._tabletop_object_path = self._get_default_path() if tabletop_object_path is None \
else tabletop_object_path
self._classes_all = ('__background__', 'foreground')
self._classes = self._classes_all
self._pixel_mean = torch.tensor(cfg.PIXEL_MEANS / 255.0).float()
self.params = data_loading_params
# crop dose not use background
if cfg.TRAIN.SYN_CROP:
self.NUM_VIEWS_PER_SCENE = 5
else:
self.NUM_VIEWS_PER_SCENE = 7
# get a list of all scenes
if image_set == 'train':
data_path = os.path.join(self._tabletop_object_path, 'training_set')
self.scene_dirs = sorted(glob.glob(data_path + '/*'))
elif image_set == 'test':
data_path = os.path.join(self._tabletop_object_path, 'test_set')
print(data_path)
self.scene_dirs = sorted(glob.glob(data_path + '/*'))
elif image_set == 'all':
data_path = os.path.join(self._tabletop_object_path, 'training_set')
scene_dirs_train = sorted(glob.glob(data_path + '/*'))
data_path = os.path.join(self._tabletop_object_path, 'test_set')
scene_dirs_test = sorted(glob.glob(data_path + '/*'))
self.scene_dirs = scene_dirs_train + scene_dirs_test
print('%d scenes for dataset %s' % (len(self.scene_dirs), self._name))
self._size = len(self.scene_dirs) * self.NUM_VIEWS_PER_SCENE
assert os.path.exists(self._tabletop_object_path), \
'tabletop_object path does not exist: {}'.format(self._tabletop_object_path)
def process_depth(self, depth_img):
""" Process depth channel
- change from millimeters to meters
- cast to float32 data type
- add random noise
- compute xyz ordered point cloud
"""
# millimeters -> meters
depth_img = (depth_img / 1000.).astype(np.float32)
# add random noise to depth
if self.params['use_data_augmentation']:
depth_img = augmentation.add_noise_to_depth(depth_img, self.params)
depth_img = augmentation.dropout_random_ellipses(depth_img, self.params)
# Compute xyz ordered point cloud and add noise
xyz_img = compute_xyz(depth_img, self.params)
if self.params['use_data_augmentation']:
xyz_img = augmentation.add_noise_to_xyz(xyz_img, depth_img, self.params)
return xyz_img
def process_label(self, foreground_labels):
""" Process foreground_labels
- Map the foreground_labels to {0, 1, ..., K-1}
@param foreground_labels: a [H x W] numpy array of labels
@return: foreground_labels
"""
# Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}
unique_nonnegative_indices = np.unique(foreground_labels)
mapped_labels = foreground_labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k
foreground_labels = mapped_labels
return foreground_labels
def pad_crop_resize(self, img, label, depth):
""" Crop the image around the label mask, then resize to 224x224
"""
H, W, _ = img.shape
# sample an object to crop
K = np.max(label)
while True:
if K > 0:
idx = np.random.randint(1, K+1)
else:
idx = 0
foreground = (label == idx).astype(np.float32)
# get tight box around label/morphed label
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(foreground)
cx = (x_min + x_max) / 2
cy = (y_min + y_max) / 2
# make bbox square
x_delta = x_max - x_min
y_delta = y_max - y_min
if x_delta > y_delta:
y_min = cy - x_delta / 2
y_max = cy + x_delta / 2
else:
x_min = cx - y_delta / 2
x_max = cx + y_delta / 2
sidelength = x_max - x_min
padding_percentage = np.random.uniform(cfg.TRAIN.min_padding_percentage, cfg.TRAIN.max_padding_percentage)
padding = int(round(sidelength * padding_percentage))
if padding == 0:
padding = 25
# Pad and be careful of boundaries
x_min = max(int(x_min - padding), 0)
x_max = min(int(x_max + padding), W-1)
y_min = max(int(y_min - padding), 0)
y_max = min(int(y_max + padding), H-1)
# crop
if (y_min == y_max) or (x_min == x_max):
continue
img_crop = img[y_min:y_max+1, x_min:x_max+1]
label_crop = label[y_min:y_max+1, x_min:x_max+1]
roi = [x_min, y_min, x_max, y_max]
if depth is not None:
depth_crop = depth[y_min:y_max+1, x_min:x_max+1]
break
# resize
s = cfg.TRAIN.SYN_CROP_SIZE
img_crop = cv2.resize(img_crop, (s, s))
label_crop = cv2.resize(label_crop, (s, s), interpolation=cv2.INTER_NEAREST)
if depth is not None:
depth_crop = cv2.resize(depth_crop, (s, s), interpolation=cv2.INTER_NEAREST)
else:
depth_crop = None
return img_crop, label_crop, depth_crop
# sample num of pixel for clustering instead of using all
def sample_pixels(self, labels, num=1000):
# -1 ignore
labels_new = -1 * np.ones_like(labels)
K = np.max(labels)
for i in range(K+1):
index = np.where(labels == i)
n = len(index[0])
if n <= num:
labels_new[index[0], index[1]] = i
else:
perm = np.random.permutation(n)
selected = perm[:num]
labels_new[index[0][selected], index[1][selected]] = i
return labels_new
def __getitem__(self, idx):
# Get scene directory, crop dose not use background
scene_idx = idx // self.NUM_VIEWS_PER_SCENE
scene_dir = self.scene_dirs[scene_idx]
# Get view number
view_num = idx % self.NUM_VIEWS_PER_SCENE
if cfg.TRAIN.SYN_CROP:
view_num += 2
# Label
foreground_labels_filename = os.path.join(scene_dir, 'segmentation_%05d.png' % view_num)
foreground_labels = util_.imread_indexed(foreground_labels_filename)
# mask table as background
foreground_labels[foreground_labels == 1] = 0
foreground_labels = self.process_label(foreground_labels)
# BGR image
filename = os.path.join(scene_dir, 'rgb_%05d.jpeg' % view_num)
im = cv2.imread(filename)
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
# Depth image
depth_img_filename = os.path.join(scene_dir, 'depth_%05d.png' % view_num)
depth_img = cv2.imread(depth_img_filename, cv2.IMREAD_ANYDEPTH) # This reads a 16-bit single-channel image. Shape: [H x W]
xyz_img = self.process_depth(depth_img)
else:
xyz_img = None
# crop
if cfg.TRAIN.SYN_CROP:
im, foreground_labels, xyz_img = self.pad_crop_resize(im, foreground_labels, xyz_img)
foreground_labels = self.process_label(foreground_labels)
# sample labels
if cfg.TRAIN.EMBEDDING_SAMPLING:
foreground_labels = self.sample_pixels(foreground_labels, cfg.TRAIN.EMBEDDING_SAMPLING_NUM)
label_blob = torch.from_numpy(foreground_labels).unsqueeze(0)
sample = {'label': label_blob}
if cfg.TRAIN.CHROMATIC and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = chromatic_transform(im)
if cfg.TRAIN.ADD_NOISE and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = add_noise(im)
im_tensor = torch.from_numpy(im) / 255.0
im_tensor -= self._pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
sample['image_color'] = image_blob
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
sample['depth'] = depth_blob
return sample
def __len__(self):
return self._size
def _get_default_path(self):
"""
Return the default path where tabletop_object is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'tabletop')
| 11,898 |
Python
| 35.725309 | 134 | 0.579089 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/datasets/ocid_object.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.utils.data as data
import os, math
import sys
import time
import random
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import datasets
import pcl
from pathlib import Path
from fcn.config import cfg
from utils.blob import chromatic_transform, add_noise
from utils import mask as util_
class OCIDObject(data.Dataset, datasets.imdb):
def __init__(self, image_set, ocid_object_path = None):
self._name = 'ocid_object_' + image_set
self._image_set = image_set
self._ocid_object_path = self._get_default_path() if ocid_object_path is None \
else ocid_object_path
self._classes_all = ('__background__', 'foreground')
self._classes = self._classes_all
self._pixel_mean = torch.tensor(cfg.PIXEL_MEANS / 255.0).float()
self._width = 640
self._height = 480
self.image_paths = self.list_dataset()
print('%d images for dataset %s' % (len(self.image_paths), self._name))
self._size = len(self.image_paths)
assert os.path.exists(self._ocid_object_path), \
'ocid_object path does not exist: {}'.format(self._ocid_object_path)
def list_dataset(self):
data_path = Path(self._ocid_object_path)
seqs = list(Path(data_path).glob('**/*seq*'))
image_paths = []
for seq in seqs:
paths = sorted(list((seq / 'rgb').glob('*.png')))
image_paths += paths
return image_paths
def process_label(self, foreground_labels):
""" Process foreground_labels
- Map the foreground_labels to {0, 1, ..., K-1}
@param foreground_labels: a [H x W] numpy array of labels
@return: foreground_labels
"""
# Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}
unique_nonnegative_indices = np.unique(foreground_labels)
mapped_labels = foreground_labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k
foreground_labels = mapped_labels
return foreground_labels
def __getitem__(self, idx):
# BGR image
filename = str(self.image_paths[idx])
im = cv2.imread(filename)
if cfg.TRAIN.CHROMATIC and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = chromatic_transform(im)
if cfg.TRAIN.ADD_NOISE and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = add_noise(im)
im_tensor = torch.from_numpy(im) / 255.0
im_tensor_bgr = im_tensor.clone()
im_tensor_bgr = im_tensor_bgr.permute(2, 0, 1)
im_tensor -= self._pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
# Label
labels_filename = filename.replace('rgb', 'label')
foreground_labels = util_.imread_indexed(labels_filename)
# mask table as background
foreground_labels[foreground_labels == 1] = 0
if 'table' in labels_filename:
foreground_labels[foreground_labels == 2] = 0
foreground_labels = self.process_label(foreground_labels)
label_blob = torch.from_numpy(foreground_labels).unsqueeze(0)
index = filename.find('OCID')
sample = {'image_color': image_blob,
'image_color_bgr': im_tensor_bgr,
'label': label_blob,
'filename': filename[index+5:]}
# Depth image
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
pcd_filename = filename.replace('rgb', 'pcd')
pcd_filename = pcd_filename.replace('png', 'pcd')
pcloud = pcl.load(pcd_filename).to_array()
pcloud[np.isnan(pcloud)] = 0
xyz_img = pcloud.reshape((self._height, self._width, 3))
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
sample['depth'] = depth_blob
return sample
def __len__(self):
return self._size
def _get_default_path(self):
"""
Return the default path where ocid_object is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'OCID')
| 4,437 |
Python
| 34.222222 | 87 | 0.601533 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/datasets/osd_object.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.utils.data as data
import os, math
import sys
import time
import random
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import datasets
import pcl
from fcn.config import cfg
from utils.blob import chromatic_transform, add_noise
from utils import mask as util_
class OSDObject(data.Dataset, datasets.imdb):
def __init__(self, image_set, osd_object_path = None):
self._name = 'osd_object_' + image_set
self._image_set = image_set
self._osd_object_path = self._get_default_path() if osd_object_path is None \
else osd_object_path
self._classes_all = ('__background__', 'foreground')
self._classes = self._classes_all
self._pixel_mean = torch.tensor(cfg.PIXEL_MEANS / 255.0).float()
self._width = 640
self._height = 480
# get all images
data_path = os.path.join(self._osd_object_path, 'image_color')
self.image_files = sorted(glob.glob(data_path + '/*.png'))
print('%d images for dataset %s' % (len(self.image_files), self._name))
self._size = len(self.image_files)
assert os.path.exists(self._osd_object_path), \
'osd_object path does not exist: {}'.format(self._osd_object_path)
def process_label(self, foreground_labels):
""" Process foreground_labels
- Map the foreground_labels to {0, 1, ..., K-1}
@param foreground_labels: a [H x W] numpy array of labels
@return: foreground_labels
"""
# Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}
unique_nonnegative_indices = np.unique(foreground_labels)
mapped_labels = foreground_labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k
foreground_labels = mapped_labels
return foreground_labels
def __getitem__(self, idx):
# BGR image
filename = self.image_files[idx]
im = cv2.imread(filename)
if cfg.TRAIN.CHROMATIC and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = chromatic_transform(im)
if cfg.TRAIN.ADD_NOISE and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = add_noise(im)
im_tensor = torch.from_numpy(im) / 255.0
im_tensor_bgr = im_tensor.clone()
im_tensor_bgr = im_tensor_bgr.permute(2, 0, 1)
im_tensor -= self._pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
# Label
labels_filename = filename.replace('image_color', 'annotation')
foreground_labels = util_.imread_indexed(labels_filename)
foreground_labels = self.process_label(foreground_labels)
label_blob = torch.from_numpy(foreground_labels).unsqueeze(0)
index = filename.find('OSD')
sample = {'image_color': image_blob,
'image_color_bgr': im_tensor_bgr,
'label': label_blob,
'filename': filename[index+4:]}
# Depth image
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
pcd_filename = filename.replace('image_color', 'pcd')
pcd_filename = pcd_filename.replace('png', 'pcd')
pcloud = pcl.load(pcd_filename).to_array()
pcloud[np.isnan(pcloud)] = 0
xyz_img = pcloud.reshape((self._height, self._width, 3))
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
sample['depth'] = depth_blob
return sample
def __len__(self):
return self._size
def _get_default_path(self):
"""
Return the default path where osd_object is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'OSD')
| 4,042 |
Python
| 34.464912 | 85 | 0.606383 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/datasets/imdb.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import os
import os.path as osp
import numpy as np
import datasets
import math
import glob
from fcn.config import cfg
class imdb(object):
"""Image database."""
def __init__(self):
self._name = ''
self._num_classes = 0
self._classes = []
self._class_colors = []
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def class_colors(self):
return self._class_colors
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(datasets.ROOT_DIR, 'data', 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
# backproject pixels into 3D points in camera's coordinate system
def backproject(self, depth_cv, intrinsic_matrix, factor):
depth = depth_cv.astype(np.float32, copy=True) / factor
index = np.where(~np.isfinite(depth))
depth[index[0], index[1]] = 0
# get intrinsic matrix
K = intrinsic_matrix
Kinv = np.linalg.inv(K)
# compute the 3D points
width = depth.shape[1]
height = depth.shape[0]
# construct the 2D points matrix
x, y = np.meshgrid(np.arange(width), np.arange(height))
ones = np.ones((height, width), dtype=np.float32)
x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3)
# backprojection
R = np.dot(Kinv, x2d.transpose())
# compute the 3D points
X = np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R)
return np.array(X).transpose().reshape((height, width, 3))
def _build_uniform_poses(self):
self.eulers = []
interval = cfg.TRAIN.UNIFORM_POSE_INTERVAL
for yaw in range(-180, 180, interval):
for pitch in range(-90, 90, interval):
for roll in range(-180, 180, interval):
self.eulers.append([yaw, pitch, roll])
# sample indexes
num_poses = len(self.eulers)
num_classes = len(self._classes_all) - 1 # no background
self.pose_indexes = np.zeros((num_classes, ), dtype=np.int32)
self.pose_lists = []
for i in range(num_classes):
self.pose_lists.append(np.random.permutation(np.arange(num_poses)))
def _build_background_images(self):
backgrounds_color = []
backgrounds_depth = []
if cfg.TRAIN.SYN_BACKGROUND_SPECIFIC:
# NVIDIA
'''
allencenter = os.path.join(self.cache_path, '../AllenCenter/data')
subdirs = os.listdir(allencenter)
for i in xrange(len(subdirs)):
subdir = subdirs[i]
files = os.listdir(os.path.join(allencenter, subdir))
for j in range(len(files)):
filename = os.path.join(allencenter, subdir, files[j])
backgrounds_color.append(filename)
'''
comotion = os.path.join(self.cache_path, '../D435-data-with-depth/data')
subdirs = os.listdir(comotion)
for i in xrange(len(subdirs)):
subdir = subdirs[i]
files = os.listdir(os.path.join(comotion, subdir))
for j in range(len(files)):
filename = os.path.join(comotion, subdir, files[j])
if 'depth.png' in filename:
backgrounds_depth.append(filename)
else:
backgrounds_color.append(filename)
backgrounds_color.sort()
backgrounds_depth.sort()
else:
'''
# SUN 2012
root = os.path.join(self.cache_path, '../SUN2012/data/Images')
subdirs = os.listdir(root)
for i in xrange(len(subdirs)):
subdir = subdirs[i]
names = os.listdir(os.path.join(root, subdir))
for j in xrange(len(names)):
name = names[j]
if os.path.isdir(os.path.join(root, subdir, name)):
files = os.listdir(os.path.join(root, subdir, name))
for k in range(len(files)):
if os.path.isdir(os.path.join(root, subdir, name, files[k])):
filenames = os.listdir(os.path.join(root, subdir, name, files[k]))
for l in range(len(filenames)):
filename = os.path.join(root, subdir, name, files[k], filenames[l])
backgrounds.append(filename)
else:
filename = os.path.join(root, subdir, name, files[k])
backgrounds.append(filename)
else:
filename = os.path.join(root, subdir, name)
backgrounds.append(filename)
# ObjectNet3D
objectnet3d = os.path.join(self.cache_path, '../ObjectNet3D/data')
files = os.listdir(objectnet3d)
for i in range(len(files)):
filename = os.path.join(objectnet3d, files[i])
backgrounds.append(filename)
'''
# PASCAL 2012
pascal = os.path.join(self.cache_path, '../PASCAL2012/data')
files = os.listdir(pascal)
for i in range(len(files)):
filename = os.path.join(pascal, files[i])
backgrounds_color.append(filename)
'''
# YCB Background
ycb = os.path.join(self.cache_path, '../YCB_Background')
files = os.listdir(ycb)
for i in range(len(files)):
filename = os.path.join(ycb, files[i])
backgrounds.append(filename)
'''
# depth background
kinect = os.path.join(self.cache_path, '../Kinect')
subdirs = os.listdir(kinect)
for i in xrange(len(subdirs)):
subdir = subdirs[i]
files = glob.glob(os.path.join(self.cache_path, '../Kinect', subdir, '*depth*'))
for j in range(len(files)):
filename = os.path.join(self.cache_path, '../Kinect', subdir, files[j])
backgrounds_depth.append(filename)
for i in xrange(len(backgrounds_color)):
if not os.path.isfile(backgrounds_color[i]):
print('file not exist {}'.format(backgrounds_color[i]))
for i in xrange(len(backgrounds_depth)):
if not os.path.isfile(backgrounds_depth[i]):
print('file not exist {}'.format(backgrounds_depth[i]))
self._backgrounds_color = backgrounds_color
self._backgrounds_depth = backgrounds_depth
print('build color background images finished, {:d} images'.format(len(backgrounds_color)))
print('build depth background images finished, {:d} images'.format(len(backgrounds_depth)))
| 7,307 |
Python
| 36.096447 | 103 | 0.542904 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/test_common.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import time
import sys, os
import numpy as np
import matplotlib.pyplot as plt
from fcn.config import cfg
from utils.mask import visualize_segmentation
def normalize_descriptor(res, stats=None):
"""
Normalizes the descriptor into RGB color space
:param res: numpy.array [H,W,D]
Output of the network, per-pixel dense descriptor
:param stats: dict, with fields ['min', 'max', 'mean'], which are used to normalize descriptor
:return: numpy.array
normalized descriptor
"""
if stats is None:
res_min = res.min()
res_max = res.max()
else:
res_min = np.array(stats['min'])
res_max = np.array(stats['max'])
normed_res = np.clip(res, res_min, res_max)
eps = 1e-10
scale = (res_max - res_min) + eps
normed_res = (normed_res - res_min) / scale
return normed_res
def _vis_features(features, labels, rgb, intial_labels, selected_pixels=None):
num = features.shape[0]
height = features.shape[2]
width = features.shape[3]
fig = plt.figure()
start = 1
m = np.ceil((num * 4 ) / 8.0)
n = 8
im_blob = rgb.cpu().numpy()
for i in range(num):
if i < m * n / 4:
# show image
im = im_blob[i, :3, :, :].copy()
im = im.transpose((1, 2, 0)) * 255.0
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = np.clip(im, 0, 255)
im = im.astype(np.uint8)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('image')
plt.axis('off')
'''
if selected_pixels is not None:
selected_indices = selected_pixels[i]
for j in range(len(selected_indices)):
index = selected_indices[j]
y = index / width
x = index % width
plt.plot(x, y, 'ro', markersize=1.0)
'''
im = torch.cuda.FloatTensor(height, width, 3)
for j in range(3):
im[:, :, j] = torch.sum(features[i, j::3, :, :], dim=0)
im = normalize_descriptor(im.detach().cpu().numpy())
im *= 255
im = im.astype(np.uint8)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('features')
plt.axis('off')
ax = fig.add_subplot(m, n, start)
start += 1
label = labels[i].detach().cpu().numpy()
plt.imshow(label)
ax.set_title('labels')
plt.axis('off')
ax = fig.add_subplot(m, n, start)
start += 1
label = intial_labels[i].detach().cpu().numpy()
plt.imshow(label)
ax.set_title('intial labels')
plt.axis('off')
plt.show()
def _vis_minibatch_segmentation_final(image, depth, label, out_label=None, out_label_refined=None,
features=None, ind=None, selected_pixels=None, bbox=None):
if depth is None:
im_blob = image.cpu().numpy()
else:
im_blob = image.cpu().numpy()
depth_blob = depth.cpu().numpy()
num = im_blob.shape[0]
height = im_blob.shape[2]
width = im_blob.shape[3]
if label is not None:
label_blob = label.cpu().numpy()
if out_label is not None:
out_label_blob = out_label.cpu().numpy()
if out_label_refined is not None:
out_label_refined_blob = out_label_refined.cpu().numpy()
m = 2
n = 3
for i in range(num):
# image
im = im_blob[i, :3, :, :].copy()
im = im.transpose((1, 2, 0)) * 255.0
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = np.clip(im, 0, 255)
im = im.astype(np.uint8)
fig = plt.figure()
start = 1
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('image')
plt.axis('off')
# depth
if depth is not None:
depth = depth_blob[i][2]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(depth)
ax.set_title('depth')
plt.axis('off')
# feature
if features is not None:
im_feature = torch.cuda.FloatTensor(height, width, 3)
for j in range(3):
im_feature[:, :, j] = torch.sum(features[i, j::3, :, :], dim=0)
im_feature = normalize_descriptor(im_feature.detach().cpu().numpy())
im_feature *= 255
im_feature = im_feature.astype(np.uint8)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im_feature)
ax.set_title('feature map')
plt.axis('off')
# initial seeds
if selected_pixels is not None:
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('initial seeds')
plt.axis('off')
selected_indices = selected_pixels[i]
for j in range(len(selected_indices)):
index = selected_indices[j]
y = index / width
x = index % width
plt.plot(x, y, 'ro', markersize=2.0)
# intial mask
mask = out_label_blob[i, :, :]
im_label = visualize_segmentation(im, mask, return_rgb=True)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im_label)
ax.set_title('initial label')
plt.axis('off')
# refined mask
if out_label_refined is not None:
mask = out_label_refined_blob[i, :, :]
im_label = visualize_segmentation(im, mask, return_rgb=True)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im_label)
ax.set_title('refined label')
plt.axis('off')
elif label is not None:
# show gt label
mask = label_blob[i, 0, :, :]
im_label = visualize_segmentation(im, mask, return_rgb=True)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im_label)
ax.set_title('gt label')
plt.axis('off')
if ind is not None:
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.pause(0.001)
# plt.show(block=False)
filename = 'output/images/%06d.png' % ind
fig.savefig(filename)
plt.close()
else:
plt.show()
def _vis_minibatch_segmentation(image, depth, label, out_label=None, out_label_refined=None,
features=None, ind=None, selected_pixels=None, bbox=None):
if depth is None:
im_blob = image.cpu().numpy()
m = 2
n = 3
else:
im_blob = image.cpu().numpy()
depth_blob = depth.cpu().numpy()
m = 3
n = 3
num = im_blob.shape[0]
height = im_blob.shape[2]
width = im_blob.shape[3]
if label is not None:
label_blob = label.cpu().numpy()
if out_label is not None:
out_label_blob = out_label.cpu().numpy()
if out_label_refined is not None:
out_label_refined_blob = out_label_refined.cpu().numpy()
for i in range(num):
# image
im = im_blob[i, :3, :, :].copy()
im = im.transpose((1, 2, 0)) * 255.0
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = np.clip(im, 0, 255)
im = im.astype(np.uint8)
'''
if out_label_refined is not None:
mask = out_label_refined_blob[i, :, :]
visualize_segmentation(im, mask)
#'''
# show image
fig = plt.figure()
start = 1
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('image')
plt.axis('off')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
plt.axis('off')
if bbox is not None:
boxes = bbox[i].numpy()
for j in range(boxes.shape[0]):
x1 = boxes[j, 0]
y1 = boxes[j, 1]
x2 = boxes[j, 2]
y2 = boxes[j, 3]
plt.gca().add_patch(
plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor='g', linewidth=3))
if selected_pixels is not None:
selected_indices = selected_pixels[i]
for j in range(len(selected_indices)):
index = selected_indices[j]
y = index / width
x = index % width
plt.plot(x, y, 'ro', markersize=1.0)
if im_blob.shape[1] == 4:
label = im_blob[i, 3, :, :]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(label)
ax.set_title('initial label')
if depth is not None:
depth = depth_blob[i]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(depth[0])
ax.set_title('depth X')
plt.axis('off')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(depth[1])
ax.set_title('depth Y')
plt.axis('off')
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(depth[2])
ax.set_title('depth Z')
plt.axis('off')
# show label
if label is not None:
label = label_blob[i, 0, :, :]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(label)
ax.set_title('gt label')
plt.axis('off')
# show out label
if out_label is not None:
label = out_label_blob[i, :, :]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(label)
ax.set_title('out label')
plt.axis('off')
# show out label refined
if out_label_refined is not None:
label = out_label_refined_blob[i, :, :]
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(label)
ax.set_title('out label refined')
plt.axis('off')
if features is not None:
im = torch.cuda.FloatTensor(height, width, 3)
for j in range(3):
im[:, :, j] = torch.sum(features[i, j::3, :, :], dim=0)
im = normalize_descriptor(im.detach().cpu().numpy())
im *= 255
im = im.astype(np.uint8)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('features')
plt.axis('off')
if ind is not None:
mng = plt.get_current_fig_manager()
plt.show()
filename = 'output/images/%06d.png' % ind
fig.savefig(filename)
plt.show()
| 11,242 |
Python
| 30.144044 | 98 | 0.491727 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/test_imageset.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn.functional as F
import time
import sys, os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from fcn.config import cfg
from fcn.test_common import normalize_descriptor
from transforms3d.quaternions import mat2quat, quat2mat, qmult
from utils.se3 import *
from utils.mean_shift import mean_shift_smart_init
def test_image_segmentation(ind, network, dataset, img, segmentor):
"""test on a single image"""
height = img.shape[0]
width = img.shape[1]
# compute image blob
inputs = img.astype(np.float32, copy=True)
inputs -= cfg.PIXEL_MEANS
inputs = np.transpose(inputs / 255.0, (2, 0, 1))
inputs = inputs[np.newaxis, :, :, :]
inputs = torch.from_numpy(inputs).cuda()
# use fake label blob
label = torch.cuda.FloatTensor(1, 2, height, width)
# run network
if network.module.embedding:
features = network(inputs, label)
out_label = torch.zeros((features.shape[0], height, width))
# mean shift clustering
num_seeds = 20
kappa = 20
for i in range(features.shape[0]):
X = features[i].view(features.shape[1], -1)
X = torch.transpose(X, 0, 1)
cluster_labels, selected_indices = mean_shift_smart_init(X, kappa=kappa, num_seeds=num_seeds, max_iters=10, metric='cosine')
out_label[i] = cluster_labels.view(height, width)
else:
out_label = network(inputs, label)
# mask refinement
if segmentor is not None:
out_label_refined, out_label_crop, rgb_crop, roi = segmentor.refine(inputs, out_label.clone())
else:
out_label_refined = None
roi = None
if cfg.TEST.VISUALIZE:
fig = plt.figure()
m = 2
n = 3
start = 1
if network.module.embedding:
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(img[:, :, (2, 1, 0)])
for i in range(num_seeds):
index = selected_indices[i]
y = index / width
x = index % width
plt.plot(x, y, 'ro')
ax.set_title('input')
im = torch.cuda.FloatTensor(height, width, 3)
for i in range(3):
im[:, :, i] = torch.sum(features[0, i::3, :, :], dim=0)
im = normalize_descriptor(im.detach().cpu().numpy())
im *= 255
im = im.astype(np.uint8)
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(im)
ax.set_title('features')
ax = fig.add_subplot(m, n, start)
start += 1
out_label_blob = out_label.cpu().numpy()
label = out_label_blob[0, :, :]
plt.imshow(label)
ax.set_title('cluster labels')
if roi is not None:
ax = fig.add_subplot(m, n, start)
start += 1
plt.imshow(img[:, :, (2, 1, 0)])
for i in range(roi.shape[0]):
x1 = roi[i, 0]
y1 = roi[i, 1]
x2 = roi[i, 2]
y2 = roi[i, 3]
plt.gca().add_patch(
plt.Rectangle((x1, y1), x2-x1, y2-y1, fill=False, edgecolor='g', linewidth=3))
if segmentor is not None:
ax = fig.add_subplot(m, n, start)
start += 1
out_label_blob = out_label_refined.cpu().numpy()
label = out_label_blob[0, :, :]
plt.imshow(label)
ax.set_title('cluster labels refined')
# mng = plt.get_current_fig_manager()
# filename = 'output/images/%06d.png' % ind
# fig.savefig(filename)
plt.show()
else:
ax = fig.add_subplot(1, 2, 1)
plt.imshow(img[:, :, (2, 1, 0)])
# show out label
out_label_blob = out_label.cpu().numpy()
label = out_label_blob[0, :, :]
ax = fig.add_subplot(1, 2, 2)
plt.imshow(label)
ax.set_title('out label')
plt.show()
return out_label, out_label_refined, roi
| 4,367 |
Python
| 32.343511 | 136 | 0.529196 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/config.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""UCN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
import math
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
__C.FLIP_X = False
__C.INPUT = 'RGBD'
__C.NETWORK = 'VGG16'
__C.RIG = ''
__C.CAD = ''
__C.POSE = ''
__C.BACKGROUND = ''
__C.USE_GPU_NMS = True
__C.MODE = 'TRAIN'
__C.INTRINSICS = ()
__C.DATA_PATH = ''
__C.FLOW_HEIGHT = 512
__C.FLOW_WIDTH = 640
# Anchor scales for RPN
__C.ANCHOR_SCALES = (8,16,32)
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = (0.5,1,2)
__C.FEATURE_STRIDE = 16
__C.gpu_id = 0
__C.instance_id = 0
#
# Training options
#
__C.TRAIN = edict()
__C.TRAIN.WEIGHT_DECAY = 0.0001
__C.TRAIN.SEGMENTATION = True
__C.TRAIN.ITERNUM = 4
__C.TRAIN.HEATUP = 4
__C.TRAIN.GPUNUM = 1
__C.TRAIN.CLASSES = (0,1,2,3)
__C.TRAIN.SYMMETRY = (0,0,0,0)
__C.TRAIN.SLIM = False
__C.TRAIN.SINGLE_FRAME = False
__C.TRAIN.TRAINABLE = True
__C.TRAIN.VERTEX_REG = True
__C.TRAIN.VERTEX_REG_DELTA = False
__C.TRAIN.POSE_REG = True
__C.TRAIN.LABEL_W = 1.0
__C.TRAIN.VERTEX_W = 1.0
__C.TRAIN.VERTEX_W_INSIDE = 10.0
__C.TRAIN.POSE_W = 1.0
__C.TRAIN.BOX_W = 1.0
__C.TRAIN.HARD_LABEL_THRESHOLD = 1.0
__C.TRAIN.HARD_LABEL_SAMPLING = 1.0
__C.TRAIN.HARD_ANGLE = 15.0
__C.TRAIN.VISUALIZE = False
__C.TRAIN.GAN = False
__C.TRAIN.MATCHING = False
__C.TRAIN.NOISE_LEVEL = 0.05
__C.TRAIN.FREEZE_LAYERS = True
__C.TRAIN.MAX_ITERS_PER_EPOCH = 1000000
__C.TRAIN.UNIFORM_POSE_INTERVAL = 15
__C.TRAIN.AFFINE = False
__C.TRAIN.CHANGE_BACKGROUND = False
__C.TRAIN.FUSION_TYPE = 'add'
# Hough voting
__C.TRAIN.HOUGH_LABEL_THRESHOLD = 100
__C.TRAIN.HOUGH_VOTING_THRESHOLD = -1
__C.TRAIN.HOUGH_SKIP_PIXELS = -1
__C.TRAIN.HOUGH_INLIER_THRESHOLD = 0.9
# synthetic training
__C.TRAIN.SYNTHESIZE = False
__C.TRAIN.SYN_ONLINE = False
__C.TRAIN.SYN_WIDTH = 640
__C.TRAIN.SYN_HEIGHT = 480
__C.TRAIN.SYNROOT = '/var/Projects/Deep_Pose/data/LOV/data_syn/'
if not os.path.exists(__C.TRAIN.SYNROOT):
__C.TRAIN.SYNROOT = '/home/yuxiang/Projects/Deep_Pose/data/LOV/data_syn/'
__C.TRAIN.SYNITER = 0
__C.TRAIN.SYNNUM = 80000
__C.TRAIN.SYN_RATIO = 1
__C.TRAIN.SYN_CLASS_INDEX = 1
__C.TRAIN.SYN_TNEAR = 0.5
__C.TRAIN.SYN_TFAR = 2.0
__C.TRAIN.SYN_BACKGROUND_SPECIFIC = False
__C.TRAIN.SYN_BACKGROUND_SUBTRACT_MEAN = False
__C.TRAIN.SYN_BACKGROUND_CONSTANT_PROB = 0.1
__C.TRAIN.SYN_BACKGROUND_AFFINE = False
__C.TRAIN.SYN_SAMPLE_OBJECT = True
__C.TRAIN.SYN_SAMPLE_POSE = True
__C.TRAIN.SYN_STD_ROTATION = 15
__C.TRAIN.SYN_STD_TRANSLATION = 0.05
__C.TRAIN.SYN_MIN_OBJECT = 5
__C.TRAIN.SYN_MAX_OBJECT = 8
__C.TRAIN.SYN_TNEAR = 0.5
__C.TRAIN.SYN_TFAR = 2.0
__C.TRAIN.SYN_BOUND = 0.4
__C.TRAIN.SYN_SAMPLE_DISTRACTOR = True
__C.TRAIN.SYN_CROP = False
__C.TRAIN.SYN_CROP_SIZE = 224
__C.TRAIN.SYN_TABLE_PROB = 0.8
# autoencoder
__C.TRAIN.BOOSTRAP_PIXELS = 20
# domain adaptation
__C.TRAIN.ADAPT = False
__C.TRAIN.ADAPT_ROOT = ''
__C.TRAIN.ADAPT_NUM = 400
__C.TRAIN.ADAPT_RATIO = 1
__C.TRAIN.ADAPT_WEIGHT = 0.1
# learning rate
__C.TRAIN.OPTIMIZER = 'MOMENTUM'
__C.TRAIN.LEARNING_RATE = 0.0001
__C.TRAIN.MILESTONES = (100, 150, 200)
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.BETA = 0.999
__C.TRAIN.GAMMA = 0.1
__C.TRAIN.SYMSIZE = 0
# voxel grid size
__C.TRAIN.GRID_SIZE = 256
# Scales to compute real features
__C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# parameters for data augmentation
__C.TRAIN.CHROMATIC = True
__C.TRAIN.ADD_NOISE = False
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
__C.TRAIN.NUM_STEPS = 5
__C.TRAIN.NUM_UNITS = 64
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_EPOCHS = 1
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'caffenet_fast_rcnn'
__C.TRAIN.SNAPSHOT_INFIX = ''
__C.TRAIN.DISPLAY = 20
__C.TRAIN.ITERS = 0
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
__C.TRAIN.FG_THRESH_POSE = 0.2
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# embedding learning
__C.TRAIN.EMBEDDING_PRETRAIN = True
__C.TRAIN.EMBEDDING_SAMPLING = False
__C.TRAIN.EMBEDDING_SAMPLING_NUM = 1000
__C.TRAIN.EMBEDDING_ALPHA = 0.02
__C.TRAIN.EMBEDDING_DELTA = 0.5
__C.TRAIN.EMBEDDING_LAMBDA_INTRA = 1.0
__C.TRAIN.EMBEDDING_LAMBDA_INTER = 1.0
__C.TRAIN.EMBEDDING_CONTRASTIVE = False
__C.TRAIN.EMBEDDING_PIXELWISE = False
__C.TRAIN.EMBEDDING_PROTOTYPE = False
__C.TRAIN.EMBEDDING_METRIC = 'euclidean'
__C.TRAIN.EMBEDDING_NORMALIZATION = True
__C.TRAIN.EMBEDDING_LOSS_WEIGHT_MATCH = 1.0
__C.TRAIN.EMBEDDING_LOSS_WEIGHT_NONMATCH = 1.0
__C.TRAIN.EMBEDDING_LOSS_WEIGHT_BACKGROUND = 1.0
# region refinement network data processing
__C.TRAIN.max_augmentation_tries = 10
# Padding
__C.TRAIN.padding_alpha = 1.0
__C.TRAIN.padding_beta = 4.0
__C.TRAIN.min_padding_percentage = 0.05
__C.TRAIN.max_padding_percentage = 0.5
# Erosion/Dilation
__C.TRAIN.rate_of_morphological_transform = 0.9
__C.TRAIN.label_dilation_alpha = 1.0
__C.TRAIN.label_dilation_beta = 19.0
__C.TRAIN.morphology_max_iters = 3
# Translation
__C.TRAIN.rate_of_translation = 0.7
__C.TRAIN.translation_alpha = 1.0
__C.TRAIN.translation_beta = 19.0
__C.TRAIN.translation_percentage_min = 0.05
# Rotation
__C.TRAIN.rate_of_rotation = 0.7
__C.TRAIN.rotation_angle_max = 10
# ADD
__C.TRAIN.rate_of_label_adding = 0.5
__C.TRAIN.add_percentage_min = 0.1
__C.TRAIN.add_percentage_max = 0.4
# CUTTING
__C.TRAIN.rate_of_label_cutting = 0.3
__C.TRAIN.cut_percentage_min = 0.25
__C.TRAIN.cut_percentage_max = 0.5
# Ellipses
__C.TRAIN.rate_of_ellipses = 0.8
__C.TRAIN.num_ellipses_mean = 3
__C.TRAIN.ellipse_gamma_base_shape = 1.0
__C.TRAIN.ellipse_gamma_base_scale = 1.0
__C.TRAIN.ellipse_size_percentage = 0.1
#
# Testing options
#
__C.TEST = edict()
__C.TEST.GLOBAL_SEARCH = False
__C.TEST.SEGMENTATION = True
__C.TEST.SINGLE_FRAME = False
__C.TEST.VERTEX_REG_2D = False
__C.TEST.VERTEX_REG_3D = False
__C.TEST.VISUALIZE = False
__C.TEST.RANSAC = False
__C.TEST.GAN = False
__C.TEST.POSE_REG = False
__C.TEST.POSE_REFINE = False
__C.TEST.POSE_SDF = True
__C.TEST.POSE_CODEBOOK = False
__C.TEST.SYNTHESIZE = False
__C.TEST.ROS_CAMERA = 'camera'
__C.TEST.DET_THRESHOLD = 0.5
__C.TEST.BUILD_CODEBOOK = False
__C.TEST.IMS_PER_BATCH = 1
__C.TEST.MEAN_SHIFT = False
__C.TEST.CHECK_SIZE = False
__C.TEST.NUM_SDF_ITERATIONS_INIT = 100
__C.TEST.NUM_SDF_ITERATIONS_TRACKING = 50
__C.TEST.SDF_TRANSLATION_REG = 10.0
__C.TEST.SDF_ROTATION_REG = 0.1
__C.TEST.NUM_LOST = 3
__C.TEST.ALIGN_Z_AXIS = False
__C.TEST.GEN_DATA = False
# Hough voting
__C.TEST.HOUGH_LABEL_THRESHOLD = 100
__C.TEST.HOUGH_VOTING_THRESHOLD = -1
__C.TEST.HOUGH_SKIP_PIXELS = -1
__C.TEST.HOUGH_INLIER_THRESHOLD = 0.9
__C.TEST.CLASSES = (0,1,2,3)
__C.TEST.SYMMETRY = (0,0,0,0)
__C.TEST.ITERNUM = 4
# Scales to compute real features
__C.TEST.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# voxel grid size
__C.TEST.GRID_SIZE = 256
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
if type(b[k]) is not type(v):
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def yaml_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
return yaml_cfg
| 12,238 |
Python
| 26.197778 | 83 | 0.672741 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/test_module.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn.functional as F
import numpy as np
from fcn.test_common import _vis_minibatch_segmentation_final
from utils.mean_shift import mean_shift_smart_init
import utils.mask as util_
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return '{:.3f} ({:.3f})'.format(self.val, self.avg)
def clustering_features(features, num_seeds=100):
metric = 'euclidean' # NOTE
height = features.shape[2]
width = features.shape[3]
out_label = torch.zeros((features.shape[0], height, width))
# mean shift clustering
kappa = 20
selected_pixels = []
for j in range(features.shape[0]):
X = features[j].view(features.shape[1], -1)
X = torch.transpose(X, 0, 1)
cluster_labels, selected_indices = mean_shift_smart_init(X, kappa=kappa, num_seeds=num_seeds, max_iters=10, metric=metric)
out_label[j] = cluster_labels.view(height, width)
selected_pixels.append(selected_indices)
return out_label, selected_pixels
def crop_rois(rgb, initial_masks, depth):
device = torch.device('cuda:0')
N, H, W = initial_masks.shape
crop_size = 224 # NOTE
padding_percentage = 0.25
mask_ids = torch.unique(initial_masks[0])
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
num = mask_ids.shape[0]
rgb_crops = torch.zeros((num, 3, crop_size, crop_size), device=device)
rois = torch.zeros((num, 4), device=device)
mask_crops = torch.zeros((num, crop_size, crop_size), device=device)
if depth is not None:
depth_crops = torch.zeros((num, 3, crop_size, crop_size), device=device)
else:
depth_crops = None
for index, mask_id in enumerate(mask_ids):
mask = (initial_masks[0] == mask_id).float() # Shape: [H x W]
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(mask)
x_padding = int(torch.round((x_max - x_min).float() * padding_percentage).item())
y_padding = int(torch.round((y_max - y_min).float() * padding_percentage).item())
# pad and be careful of boundaries
x_min = max(x_min - x_padding, 0)
x_max = min(x_max + x_padding, W-1)
y_min = max(y_min - y_padding, 0)
y_max = min(y_max + y_padding, H-1)
rois[index, 0] = x_min
rois[index, 1] = y_min
rois[index, 2] = x_max
rois[index, 3] = y_max
# crop
rgb_crop = rgb[0, :, y_min:y_max+1, x_min:x_max+1] # [3 x crop_H x crop_W]
mask_crop = mask[y_min:y_max+1, x_min:x_max+1] # [crop_H x crop_W]
if depth is not None:
depth_crop = depth[0, :, y_min:y_max+1, x_min:x_max+1] # [3 x crop_H x crop_W]
# resize
new_size = (crop_size, crop_size)
rgb_crop = F.upsample_bilinear(rgb_crop.unsqueeze(0), new_size)[0] # Shape: [3 x new_H x new_W]
rgb_crops[index] = rgb_crop
mask_crop = F.upsample_nearest(mask_crop.unsqueeze(0).unsqueeze(0), new_size)[0,0] # Shape: [new_H, new_W]
mask_crops[index] = mask_crop
if depth is not None:
depth_crop = F.upsample_bilinear(depth_crop.unsqueeze(0), new_size)[0] # Shape: [3 x new_H x new_W]
depth_crops[index] = depth_crop
return rgb_crops, mask_crops, rois, depth_crops
# labels_crop is the clustering labels from the local patch
def match_label_crop(initial_masks, labels_crop, out_label_crop, rois, depth_crop):
num = labels_crop.shape[0]
for i in range(num):
mask_ids = torch.unique(labels_crop[i])
for index, mask_id in enumerate(mask_ids):
mask = (labels_crop[i] == mask_id).float()
overlap = mask * out_label_crop[i]
percentage = torch.sum(overlap) / torch.sum(mask)
if percentage < 0.5:
labels_crop[i][labels_crop[i] == mask_id] = -1
# sort the local labels
sorted_ids = []
for i in range(num):
if depth_crop is not None:
if torch.sum(labels_crop[i] > -1) > 0:
roi_depth = depth_crop[i, 2][labels_crop[i] > -1]
else:
roi_depth = depth_crop[i, 2]
avg_depth = torch.mean(roi_depth[roi_depth > 0])
sorted_ids.append((i, avg_depth))
else:
x_min = rois[i, 0]
y_min = rois[i, 1]
x_max = rois[i, 2]
y_max = rois[i, 3]
orig_H = y_max - y_min + 1
orig_W = x_max - x_min + 1
roi_size = orig_H * orig_W
sorted_ids.append((i, roi_size))
sorted_ids = sorted(sorted_ids, key=lambda x : x[1], reverse=True)
sorted_ids = [x[0] for x in sorted_ids]
# combine the local labels
refined_masks = torch.zeros_like(initial_masks).float()
count = 0
for index in sorted_ids:
mask_ids = torch.unique(labels_crop[index])
if mask_ids[0] == -1:
mask_ids = mask_ids[1:]
# mapping
label_crop = torch.zeros_like(labels_crop[index])
for mask_id in mask_ids:
count += 1
label_crop[labels_crop[index] == mask_id] = count
# resize back to original size
x_min = int(rois[index, 0].item())
y_min = int(rois[index, 1].item())
x_max = int(rois[index, 2].item())
y_max = int(rois[index, 3].item())
orig_H = int(y_max - y_min + 1)
orig_W = int(x_max - x_min + 1)
mask = label_crop.unsqueeze(0).unsqueeze(0).float()
resized_mask = F.upsample_nearest(mask, (orig_H, orig_W))[0, 0]
# Set refined mask
h_idx, w_idx = torch.nonzero(resized_mask).t()
refined_masks[0, y_min:y_max+1, x_min:x_max+1][h_idx, w_idx] = resized_mask[h_idx, w_idx].cpu()
return refined_masks, labels_crop
# filter labels on zero depths
def filter_labels_depth(labels, depth, threshold):
labels_new = labels.clone()
for i in range(labels.shape[0]):
label = labels[i]
mask_ids = torch.unique(label)
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
for index, mask_id in enumerate(mask_ids):
mask = (label == mask_id).float()
roi_depth = depth[i, 2][label == mask_id]
depth_percentage = torch.sum(roi_depth > 0).float() / torch.sum(mask)
if depth_percentage < threshold:
labels_new[i][label == mask_id] = 0
return labels_new
# filter labels inside boxes
def filter_labels(labels, bboxes):
labels_new = labels.clone()
height = labels.shape[1]
width = labels.shape[2]
for i in range(labels.shape[0]):
label = labels[i]
bbox = bboxes[i].numpy()
bbox_mask = torch.zeros_like(label)
for j in range(bbox.shape[0]):
x1 = max(int(bbox[j, 0]), 0)
y1 = max(int(bbox[j, 1]), 0)
x2 = min(int(bbox[j, 2]), width-1)
y2 = min(int(bbox[j, 3]), height-1)
bbox_mask[y1:y2, x1:x2] = 1
mask_ids = torch.unique(label)
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
for index, mask_id in enumerate(mask_ids):
mask = (label == mask_id).float()
percentage = torch.sum(mask * bbox_mask) / torch.sum(mask)
if percentage > 0.8:
labels_new[i][label == mask_id] = 0
return labels_new
def compute_xyz(depth_img, fx, fy, px, py, height, width):
indices = util_.build_matrix_of_indices(height, width)
z_e = depth_img
x_e = (indices[..., 1] - px) * z_e / fx
y_e = (indices[..., 0] - py) * z_e / fy
xyz_img = np.stack([x_e, y_e, z_e], axis=-1) # Shape: [H x W x 3]
return xyz_img
def transform_by_matrix( points, mat, is_vec=False, is_point_image=False ):
"""
Args:
points: np.array [N, 3]
mat: np.array [4, 4]
is_vec: bool
Returns:
trans_points: np.array [N, 3]
"""
rot = mat[:3, :3]
w, h = mat.shape
if w == 3 and h == 3:
m = np.identity(4)
m[:3,:3] = rot
mat = m
if is_point_image:
trans_points = np.einsum('ij,abj->abi', rot, points )
else:
trans_points = np.einsum('ij,aj->ai', rot, points )
if not is_vec:
trans = mat[:3, 3]
trans_points += trans
return trans_points
def depth_to_pc(depth, cam_intrinsic, cam_to_world=None):
"""
Args:
depth: np.array [w, h, 3]
cam_intrinsic: np.array [3, 3]
cam_to_world: np.array [3, 3]
with_noise: bool
Returns:
pointcloud: np.array [w, h, 3]
"""
depth = depth.transpose(1, 0)
w, h = depth.shape
u0 = cam_intrinsic[0,2]
v0 = cam_intrinsic[1,2]
fx = cam_intrinsic[0, 0]
fy = cam_intrinsic[1, 1]
v, u = np.meshgrid( range(h), range(w) )
z = depth
x = (u - u0) * z / fx
y = (v - v0) * z / fy
z = z.reshape(w, h, 1)
x = x.reshape(w, h, 1)
y = y.reshape(w, h, 1)
depth = depth.transpose(1, 0)
# 640 * 480 * 3
ret = np.concatenate([x,y,z], axis=-1).astype('float32')
# translate to world coordinate
if cam_to_world is not None:
ret = transform_by_matrix(ret, cam_to_world, is_point_image=True)
ret = ret.transpose(1, 0, 2)
return ret
def img_process(rgb, depth, camera_params):
PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
im_tensor = torch.from_numpy(rgb) / 255.0
pixel_mean = torch.tensor(PIXEL_MEANS / 255.0).float()
im_tensor -= pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
image_blob = image_blob.unsqueeze(0)
# bgr image
# bgr ?
# dep xxx m
if depth is not None:
# height = depth.shape[0]
# width = depth.shape[1]
# fx = camera_params['fx']
# fy = camera_params['fy']
# px = camera_params['x_offset']
# py = camera_params['y_offset']
# xyz_img = compute_xyz(depth, fx, fy, px, py, height, width)
xyz_img = depth_to_pc(depth, camera_params['cam'])
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
depth_blob = depth_blob.unsqueeze(0)
else:
depth_blob = None
return image_blob, depth_blob
def img_segment(rgb, dep, network, network_crop, device, visual=False):
# construct input
image = rgb.cuda(device=device)
if dep is not None:
depth = dep.cuda(device=device)
else:
depth = None
label = None
# run network
features = network(image, label, depth).detach()
out_label, selected_pixels = clustering_features(features, num_seeds=100)
if depth is not None:
# filter labels on zero depth
out_label = filter_labels_depth(out_label, depth, 0.8)
# zoom in refinement
out_label_refined = None
if network_crop is not None:
rgb_crop, out_label_crop, rois, depth_crop = crop_rois(image, out_label.clone(), depth)
if rgb_crop.shape[0] > 0:
features_crop = network_crop(rgb_crop, out_label_crop, depth_crop)
labels_crop, selected_pixels_crop = clustering_features(features_crop)
out_label_refined, labels_crop = match_label_crop(out_label, labels_crop.cuda(), out_label_crop, rois, depth_crop)
if visual:
bbox = None
_vis_minibatch_segmentation_final(image, depth, label, out_label, out_label_refined, features,
selected_pixels=selected_pixels, bbox=bbox)
return out_label, out_label_refined
| 11,962 |
Python
| 31.508152 | 130 | 0.568467 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/__init__.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
| 180 |
Python
| 44.249989 | 83 | 0.772222 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/test_dataset.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import sys, os
import numpy as np
import cv2
import scipy
import matplotlib.pyplot as plt
from fcn.config import cfg
from fcn.test_common import _vis_minibatch_segmentation, _vis_features, _vis_minibatch_segmentation_final
from transforms3d.quaternions import mat2quat, quat2mat, qmult
from utils.mean_shift import mean_shift_smart_init
from utils.evaluation import multilabel_metrics
import utils.mask as util_
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return '{:.3f} ({:.3f})'.format(self.val, self.avg)
def clustering_features(features, num_seeds=100):
metric = cfg.TRAIN.EMBEDDING_METRIC
height = features.shape[2]
width = features.shape[3]
out_label = torch.zeros((features.shape[0], height, width))
# mean shift clustering
kappa = 20
selected_pixels = []
for j in range(features.shape[0]):
X = features[j].view(features.shape[1], -1)
X = torch.transpose(X, 0, 1)
cluster_labels, selected_indices = mean_shift_smart_init(X, kappa=kappa, num_seeds=num_seeds, max_iters=10, metric=metric)
out_label[j] = cluster_labels.view(height, width)
selected_pixels.append(selected_indices)
return out_label, selected_pixels
def crop_rois(rgb, initial_masks, depth):
N, H, W = initial_masks.shape
crop_size = cfg.TRAIN.SYN_CROP_SIZE
padding_percentage = 0.25
mask_ids = torch.unique(initial_masks[0])
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
num = mask_ids.shape[0]
rgb_crops = torch.zeros((num, 3, crop_size, crop_size), device=cfg.device)
rois = torch.zeros((num, 4), device=cfg.device)
mask_crops = torch.zeros((num, crop_size, crop_size), device=cfg.device)
if depth is not None:
depth_crops = torch.zeros((num, 3, crop_size, crop_size), device=cfg.device)
else:
depth_crops = None
for index, mask_id in enumerate(mask_ids):
mask = (initial_masks[0] == mask_id).float() # Shape: [H x W]
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(mask)
x_padding = int(torch.round((x_max - x_min).float() * padding_percentage).item())
y_padding = int(torch.round((y_max - y_min).float() * padding_percentage).item())
# pad and be careful of boundaries
x_min = max(x_min - x_padding, 0)
x_max = min(x_max + x_padding, W-1)
y_min = max(y_min - y_padding, 0)
y_max = min(y_max + y_padding, H-1)
rois[index, 0] = x_min
rois[index, 1] = y_min
rois[index, 2] = x_max
rois[index, 3] = y_max
# crop
rgb_crop = rgb[0, :, y_min:y_max+1, x_min:x_max+1] # [3 x crop_H x crop_W]
mask_crop = mask[y_min:y_max+1, x_min:x_max+1] # [crop_H x crop_W]
if depth is not None:
depth_crop = depth[0, :, y_min:y_max+1, x_min:x_max+1] # [3 x crop_H x crop_W]
# resize
new_size = (crop_size, crop_size)
rgb_crop = F.upsample_bilinear(rgb_crop.unsqueeze(0), new_size)[0] # Shape: [3 x new_H x new_W]
rgb_crops[index] = rgb_crop
mask_crop = F.upsample_nearest(mask_crop.unsqueeze(0).unsqueeze(0), new_size)[0,0] # Shape: [new_H, new_W]
mask_crops[index] = mask_crop
if depth is not None:
depth_crop = F.upsample_bilinear(depth_crop.unsqueeze(0), new_size)[0] # Shape: [3 x new_H x new_W]
depth_crops[index] = depth_crop
return rgb_crops, mask_crops, rois, depth_crops
# labels_crop is the clustering labels from the local patch
def match_label_crop(initial_masks, labels_crop, out_label_crop, rois, depth_crop):
num = labels_crop.shape[0]
for i in range(num):
mask_ids = torch.unique(labels_crop[i])
for index, mask_id in enumerate(mask_ids):
mask = (labels_crop[i] == mask_id).float()
overlap = mask * out_label_crop[i]
percentage = torch.sum(overlap) / torch.sum(mask)
if percentage < 0.5:
labels_crop[i][labels_crop[i] == mask_id] = -1
# sort the local labels
sorted_ids = []
for i in range(num):
if depth_crop is not None:
if torch.sum(labels_crop[i] > -1) > 0:
roi_depth = depth_crop[i, 2][labels_crop[i] > -1]
else:
roi_depth = depth_crop[i, 2]
avg_depth = torch.mean(roi_depth[roi_depth > 0])
sorted_ids.append((i, avg_depth))
else:
x_min = rois[i, 0]
y_min = rois[i, 1]
x_max = rois[i, 2]
y_max = rois[i, 3]
orig_H = y_max - y_min + 1
orig_W = x_max - x_min + 1
roi_size = orig_H * orig_W
sorted_ids.append((i, roi_size))
sorted_ids = sorted(sorted_ids, key=lambda x : x[1], reverse=True)
sorted_ids = [x[0] for x in sorted_ids]
# combine the local labels
refined_masks = torch.zeros_like(initial_masks).float()
count = 0
for index in sorted_ids:
mask_ids = torch.unique(labels_crop[index])
if mask_ids[0] == -1:
mask_ids = mask_ids[1:]
# mapping
label_crop = torch.zeros_like(labels_crop[index])
for mask_id in mask_ids:
count += 1
label_crop[labels_crop[index] == mask_id] = count
# resize back to original size
x_min = int(rois[index, 0].item())
y_min = int(rois[index, 1].item())
x_max = int(rois[index, 2].item())
y_max = int(rois[index, 3].item())
orig_H = int(y_max - y_min + 1)
orig_W = int(x_max - x_min + 1)
mask = label_crop.unsqueeze(0).unsqueeze(0).float()
resized_mask = F.upsample_nearest(mask, (orig_H, orig_W))[0, 0]
# Set refined mask
h_idx, w_idx = torch.nonzero(resized_mask).t()
refined_masks[0, y_min:y_max+1, x_min:x_max+1][h_idx, w_idx] = resized_mask[h_idx, w_idx].cpu()
return refined_masks, labels_crop
# filter labels on zero depths
def filter_labels_depth(labels, depth, threshold):
labels_new = labels.clone()
for i in range(labels.shape[0]):
label = labels[i]
mask_ids = torch.unique(label)
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
for index, mask_id in enumerate(mask_ids):
mask = (label == mask_id).float()
roi_depth = depth[i, 2][label == mask_id]
depth_percentage = torch.sum(roi_depth > 0).float() / torch.sum(mask)
if depth_percentage < threshold:
labels_new[i][label == mask_id] = 0
return labels_new
# filter labels inside boxes
def filter_labels(labels, bboxes):
labels_new = labels.clone()
height = labels.shape[1]
width = labels.shape[2]
for i in range(labels.shape[0]):
label = labels[i]
bbox = bboxes[i].numpy()
bbox_mask = torch.zeros_like(label)
for j in range(bbox.shape[0]):
x1 = max(int(bbox[j, 0]), 0)
y1 = max(int(bbox[j, 1]), 0)
x2 = min(int(bbox[j, 2]), width-1)
y2 = min(int(bbox[j, 3]), height-1)
bbox_mask[y1:y2, x1:x2] = 1
mask_ids = torch.unique(label)
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
for index, mask_id in enumerate(mask_ids):
mask = (label == mask_id).float()
percentage = torch.sum(mask * bbox_mask) / torch.sum(mask)
if percentage > 0.8:
labels_new[i][label == mask_id] = 0
return labels_new
# test a single sample
def test_sample(sample, network, network_crop):
# construct input
image = sample['image_color'].cuda()
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
depth = sample['depth'].cuda()
else:
depth = None
if 'label' in sample:
label = sample['label'].cuda()
else:
label = None
# run network
features = network(image, label, depth).detach()
out_label, selected_pixels = clustering_features(features, num_seeds=100)
if depth is not None:
# filter labels on zero depth
out_label = filter_labels_depth(out_label, depth, 0.8)
# zoom in refinement
out_label_refined = None
if network_crop is not None:
rgb_crop, out_label_crop, rois, depth_crop = crop_rois(image, out_label.clone(), depth)
if rgb_crop.shape[0] > 0:
features_crop = network_crop(rgb_crop, out_label_crop, depth_crop)
labels_crop, selected_pixels_crop = clustering_features(features_crop)
out_label_refined, labels_crop = match_label_crop(out_label, labels_crop.cuda(), out_label_crop, rois, depth_crop)
if cfg.TEST.VISUALIZE:
bbox = None
_vis_minibatch_segmentation_final(image, depth, label, out_label, out_label_refined, features,
selected_pixels=selected_pixels, bbox=bbox)
return out_label, out_label_refined
# test a dataset
def test_segnet(test_loader, network, output_dir, network_crop):
batch_time = AverageMeter()
epoch_size = len(test_loader)
# switch to test mode
network.eval()
if network_crop is not None:
network_crop.eval()
metrics_all = []
metrics_all_refined = []
for i, sample in enumerate(test_loader):
end = time.time()
# construct input
image = sample['image_color'].cuda()
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
depth = sample['depth'].cuda()
else:
depth = None
label = sample['label'].cuda()
# run network
features = network(image, label, depth).detach()
out_label, selected_pixels = clustering_features(features, num_seeds=100)
if 'ocid' in test_loader.dataset.name and depth is not None:
# filter labels on zero depth
out_label = filter_labels_depth(out_label, depth, 0.5)
if 'osd' in test_loader.dataset.name and depth is not None:
# filter labels on zero depth
out_label = filter_labels_depth(out_label, depth, 0.8)
# evaluation
gt = sample['label'].squeeze().numpy()
prediction = out_label.squeeze().detach().cpu().numpy()
metrics = multilabel_metrics(prediction, gt)
metrics_all.append(metrics)
print(metrics)
# zoom in refinement
out_label_refined = None
if network_crop is not None:
rgb_crop, out_label_crop, rois, depth_crop = crop_rois(image, out_label.clone(), depth)
if rgb_crop.shape[0] > 0:
features_crop = network_crop(rgb_crop, out_label_crop, depth_crop)
labels_crop, selected_pixels_crop = clustering_features(features_crop)
out_label_refined, labels_crop = match_label_crop(out_label, labels_crop.cuda(), out_label_crop, rois, depth_crop)
# evaluation
if out_label_refined is not None:
prediction_refined = out_label_refined.squeeze().detach().cpu().numpy()
else:
prediction_refined = prediction.copy()
metrics_refined = multilabel_metrics(prediction_refined, gt)
metrics_all_refined.append(metrics_refined)
print(metrics_refined)
if cfg.TEST.VISUALIZE:
_vis_minibatch_segmentation(image, depth, label, out_label, out_label_refined, features,
selected_pixels=selected_pixels, bbox=None)
else:
# save results
result = {'labels': prediction, 'labels_refined': prediction_refined, 'filename': sample['filename']}
filename = os.path.join(output_dir, '%06d.mat' % i)
print(filename)
scipy.io.savemat(filename, result, do_compression=True)
# measure elapsed time
batch_time.update(time.time() - end)
print('[%d/%d], batch time %.2f' % (i, epoch_size, batch_time.val))
# sum the values with same keys
print('========================================================')
result = {}
num = len(metrics_all)
print('%d images' % num)
print('========================================================')
for metrics in metrics_all:
for k in metrics.keys():
result[k] = result.get(k, 0) + metrics[k]
for k in sorted(result.keys()):
result[k] /= num
print('%s: %f' % (k, result[k]))
print('%.6f' % (result['Objects Precision']))
print('%.6f' % (result['Objects Recall']))
print('%.6f' % (result['Objects F-measure']))
print('%.6f' % (result['Boundary Precision']))
print('%.6f' % (result['Boundary Recall']))
print('%.6f' % (result['Boundary F-measure']))
print('%.6f' % (result['obj_detected_075_percentage']))
print('========================================================')
print(result)
print('====================Refined=============================')
result_refined = {}
for metrics in metrics_all_refined:
for k in metrics.keys():
result_refined[k] = result_refined.get(k, 0) + metrics[k]
for k in sorted(result_refined.keys()):
result_refined[k] /= num
print('%s: %f' % (k, result_refined[k]))
print(result_refined)
print('========================================================')
| 13,840 |
Python
| 35.232984 | 130 | 0.575795 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/fcn/train.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn as nn
import time
import sys, os
import numpy as np
import matplotlib.pyplot as plt
from fcn.config import cfg
from fcn.test_common import _vis_minibatch_segmentation
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __repr__(self):
return '{:.3f} ({:.3f})'.format(self.val, self.avg)
def train_segnet(train_loader, network, optimizer, epoch):
batch_time = AverageMeter()
epoch_size = len(train_loader)
# switch to train mode
network.train()
for i, sample in enumerate(train_loader):
end = time.time()
# construct input
image = sample['image_color'].cuda()
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
depth = sample['depth'].cuda()
else:
depth = None
label = sample['label'].cuda()
loss, intra_cluster_loss, inter_cluster_loss, features = network(image, label, depth)
loss = torch.sum(loss)
intra_cluster_loss = torch.sum(intra_cluster_loss)
inter_cluster_loss = torch.sum(inter_cluster_loss)
out_label = None
if cfg.TRAIN.VISUALIZE:
_vis_minibatch_segmentation(image, depth, label, out_label, features=features)
# compute gradient and do optimization step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
print('[%d/%d][%d/%d], loss %.4f, loss intra: %.4f, loss_inter %.4f, lr %.6f, time %.2f' \
% (epoch, cfg.epochs, i, epoch_size, loss, intra_cluster_loss, inter_cluster_loss, optimizer.param_groups[0]['lr'], batch_time.val))
cfg.TRAIN.ITERS += 1
| 2,231 |
Python
| 27.987013 | 144 | 0.609592 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/networks/embedding.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import sys, os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
########## Embedding Loss ##########
def zero_diagonal(x):
""" Sets diagonal elements of x to 0
@param x: a [batch_size x S x S] torch.FloatTensor
"""
S = x.shape[1]
return x * (1- torch.eye(S).to(x.device))
def compute_cluster_mean(x, cluster_masks, K, normalize):
""" Computes the spherical mean of a set of unit vectors. This is a PyTorch implementation
The definition of spherical mean is minimizes cosine similarity
to a set of points instead of squared error.
Solves this problem:
argmax_{||w||^2 <= 1} (sum_i x_i)^T w
Turns out the solution is: S_n / ||S_n||, where S_n = sum_i x_i.
If S_n = 0, w can be anything.
@param x: a [batch_size x C x H x W] torch.FloatTensor of N NORMALIZED C-dimensional unit vectors
@param cluster_masks: a [batch_size x K x H x W] torch.FloatTensor of ground truth cluster assignments in {0, ..., K-1}.
Note: cluster -1 (i.e. no cluster assignment) is ignored
@param K: number of clusters
@return: a [batch_size x C x K] torch.FloatTensor of NORMALIZED cluster means
"""
batch_size, C = x.shape[:2]
cluster_means = torch.zeros((batch_size, C, K), device=x.device)
for k in range(K):
mask = (cluster_masks == k).float() # Shape: [batch_size x 1 x H x W]
# adding 1e-10 because if mask has nothing, it'll hit NaNs
# * here is broadcasting
cluster_means[:,:,k] = torch.sum(x * mask, dim=[2, 3]) / (torch.sum(mask, dim=[2, 3]) + 1e-10)
# normalize to compute spherical mean
if normalize:
cluster_means = F.normalize(cluster_means, p=2, dim=1) # Note, if any vector is zeros, F.normalize will return the zero vector
return cluster_means
class EmbeddingLoss(nn.Module):
def __init__(self, alpha, delta, lambda_intra, lambda_inter, metric='cosine', normalize=True):
super(EmbeddingLoss, self).__init__()
self.alpha = alpha
self.delta = delta
self.lambda_intra = lambda_intra
self.lambda_inter = lambda_inter
self.metric = metric
self.normalize = normalize
def forward(self, x, cluster_masks):
""" Compute the clustering loss. Assumes the batch is a sequence of consecutive frames
@param x: a [batch_size x C x H x W] torch.FloatTensor of pixel embeddings
@param cluster_masks: a [batch_size x 1 x H x W] torch.FloatTensor of ground truth cluster assignments in {0, ..., K-1}
"""
batch_size = x.shape[0]
K = int(cluster_masks.max().item()) + 1
# Compute cluster means across batch dimension
cluster_means = compute_cluster_mean(x, cluster_masks, K, self.normalize) # Shape: [batch_size x C x K]
### Intra cluster loss ###
# Tile the cluster means appropriately. Also calculate number of pixels per mask for pixel weighting
tiled_cluster_means = torch.zeros_like(x, device=x.device) # Shape: [batch_size x C x H x W]
for k in range(K):
mask = (cluster_masks == k).float() # Shape: [batch_size x 1 x H x W]
tiled_cluster_means += mask * cluster_means[:,:,k].unsqueeze(2).unsqueeze(3)
# ignore label -1
labeled_embeddings = (cluster_masks >= 0).squeeze(1).float() # Shape: [batch_size x H x W]
# Compute distance to cluster center
if self.metric == 'cosine':
intra_cluster_distances = labeled_embeddings * (0.5 * (1 - torch.sum(x * tiled_cluster_means, dim=1))) # Shape: [batch_size x H x W]
elif self.metric == 'euclidean':
intra_cluster_distances = labeled_embeddings * (torch.norm(x - tiled_cluster_means, dim=1))
# Hard Negative Mining
intra_cluster_mask = (intra_cluster_distances - self.alpha) > 0
intra_cluster_mask = intra_cluster_mask.float()
if torch.sum(intra_cluster_mask) > 0:
intra_cluster_loss = torch.pow(intra_cluster_distances, 2)
# calculate datapoint_weights
datapoint_weights = torch.zeros((batch_size,) + intra_cluster_distances.shape[1:], device=x.device)
for k in range(K):
# find number of datapoints in cluster k that are > alpha away from cluster center
mask = (cluster_masks == k).float().squeeze(1) # Shape: [batch_size x H x W]
N_k = torch.sum((intra_cluster_distances > self.alpha).float() * mask, dim=[1, 2], keepdim=True) # Shape: [batch_size x 1 x 1]
datapoint_weights += mask * N_k
datapoint_weights = torch.max(datapoint_weights, torch.FloatTensor([50]).to(x.device)) # Max it with 50 so it doesn't get too small
datapoint_weights *= K
intra_cluster_loss = torch.sum(intra_cluster_loss / datapoint_weights) / batch_size
else:
intra_cluster_loss = torch.sum(Variable(torch.zeros(1, device=x.device), requires_grad=True))
intra_cluster_loss = self.lambda_intra * intra_cluster_loss
### Inter cluster loss ###
if K > 1:
if self.metric == 'cosine':
# Shape: [batch_size x K x K]
inter_cluster_distances = .5 * (1 - torch.sum(cluster_means.unsqueeze(2) * cluster_means.unsqueeze(3), dim=1))
elif self.metric == 'euclidean':
inter_cluster_distances = torch.norm(cluster_means.unsqueeze(2) - cluster_means.unsqueeze(3), dim=1)
inter_cluster_loss = torch.sum(torch.pow(torch.clamp(zero_diagonal(self.delta - inter_cluster_distances), min=0), 2)) / (K*(K-1)/2 * batch_size)
inter_cluster_loss = self.lambda_inter * inter_cluster_loss
else:
inter_cluster_loss = torch.sum(Variable(torch.zeros(1, device=x.device), requires_grad=True))
loss = intra_cluster_loss + inter_cluster_loss
return loss, intra_cluster_loss, inter_cluster_loss
| 6,266 |
Python
| 45.768656 | 156 | 0.620811 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/networks/resnet.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import numpy as np
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"3x3 convolution with padding"
kernel_size = np.asarray((3, 3))
# Compute the size of the upsampled filter with
# a specified dilation rate.
upsampled_kernel_size = (kernel_size - 1) * (dilation - 1) + kernel_size
# Determine the padding that is necessary for full padding,
# meaning the output spatial size is equal to input spatial size
full_padding = (upsampled_kernel_size - 1) // 2
# Conv2d doesn't accept numpy arrays as arguments
full_padding, kernel_size = tuple(full_padding), tuple(kernel_size)
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=full_padding, dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=stride, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
num_classes=1000,
fully_conv=False,
remove_avg_pool_layer=False,
output_stride=32,
input_channels=3,
additional_blocks=0,
multi_grid=(1,1,1) ):
# Add additional variables to track
# output stride. Necessary to achieve
# specified output stride.
self.output_stride = output_stride
self.current_stride = 4
self.current_dilation = 1
self.remove_avg_pool_layer = remove_avg_pool_layer
self.inplanes = 64
self.fully_conv = fully_conv
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
self.additional_blocks = additional_blocks
if additional_blocks == 1:
self.layer5 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
if additional_blocks == 2:
self.layer5 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
self.layer6 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
if additional_blocks == 3:
self.layer5 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
self.layer6 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
self.layer7 = self._make_layer(block, 512, layers[3], stride=2, multi_grid=multi_grid)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
if self.fully_conv:
self.avgpool = nn.AvgPool2d(7, padding=3, stride=1)
# In the latest unstable torch 4.0 the tensor.copy_
# method was changed and doesn't work as it used to be
#self.fc = nn.Conv2d(512 * block.expansion, num_classes, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self,
block,
planes,
blocks,
stride=1,
multi_grid=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
# Check if we already achieved desired output stride.
if self.current_stride == self.output_stride:
# If so, replace subsampling with a dilation to preserve
# current spatial resolution.
self.current_dilation = self.current_dilation * stride
stride = 1
else:
# If not, perform subsampling and update current
# new output stride.
self.current_stride = self.current_stride * stride
# We don't dilate 1x1 convolution.
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
dilation = multi_grid[0] * self.current_dilation if multi_grid else self.current_dilation
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
dilation = multi_grid[i] * self.current_dilation if multi_grid else self.current_dilation
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.additional_blocks == 1:
x = self.layer5(x)
if self.additional_blocks == 2:
x = self.layer5(x)
x = self.layer6(x)
if self.additional_blocks == 3:
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
if not self.remove_avg_pool_layer:
x = self.avgpool(x)
if not self.fully_conv:
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
if model.additional_blocks:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']), strict=False)
return model
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def update_model_print(model, data):
model_dict = model.state_dict()
print('model keys')
print('=================================================')
for k, v in model_dict.items():
print(k)
print('=================================================')
if data is not None:
print('data keys')
print('=================================================')
for k, v in data.items():
print(k)
print('=================================================')
pretrained_dict = {k: v for k, v in data.items() if k in model_dict and v.size() == model_dict[k].size()}
print('load the following keys from the pretrained model')
print('=================================================')
for k, v in pretrained_dict.items():
print(k)
print('=================================================')
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def update_model(model, data):
model_dict = model.state_dict()
if data is not None:
pretrained_dict = {k: v for k, v in data.items() if k in model_dict and v.size() == model_dict[k].size()}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
if model.additional_blocks:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']), strict=False)
return model
# model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
data = model_zoo.load_url(model_urls['resnet34'])
update_model(model, data)
else:
print('=============no pretrained weights===============')
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
if model.additional_blocks:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']), strict=False)
return model
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
if model.additional_blocks:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']), strict=False)
return model
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 13,087 |
Python
| 31.078431 | 113 | 0.548254 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/networks/unets.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn as nn
import utils as util_
class Conv2d_GN_ReLU(nn.Module):
""" Implements a module that performs
conv2d + groupnorm + ReLU +
Assumes kernel size is odd
"""
def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1):
super(Conv2d_GN_ReLU, self).__init__()
padding = 0 if ksize < 2 else ksize//2
self.conv1 = nn.Conv2d(in_channels, out_channels,
kernel_size=ksize, stride=stride,
padding=padding, bias=False)
self.gn1 = nn.GroupNorm(num_groups, out_channels)
self.relu1 = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.gn1(out)
out = self.relu1(out)
return out
class Conv2d_GN_ReLUx2(nn.Module):
""" Implements a module that performs
conv2d + groupnorm + ReLU +
conv2d + groupnorm + ReLU
(and a possible downsampling operation)
Assumes kernel size is odd
"""
def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1):
super(Conv2d_GN_ReLUx2, self).__init__()
self.layer1 = Conv2d_GN_ReLU(in_channels, out_channels,
num_groups, ksize=ksize, stride=stride)
self.layer2 = Conv2d_GN_ReLU(out_channels, out_channels,
num_groups, ksize=ksize, stride=stride)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
return out
class Upsample_Concat_Conv2d_GN_ReLU_Multi_Branch(nn.Module):
""" Implements a module that performs
Upsample (reduction: conv2d + groupnorm + ReLU + bilinear_sampling) +
concat + conv2d + groupnorm + ReLU
for the U-Net decoding architecture with an arbitrary number of encoders
The Upsample operation consists of a Conv2d_GN_ReLU that reduces the channels by 2,
followed by bilinear sampling
Note: in_channels is number of channels of ONE of the inputs to the concatenation
"""
def __init__(self, in_channels, out_channels, num_groups, num_encoders, ksize=3, stride=1):
super(Upsample_Concat_Conv2d_GN_ReLU_Multi_Branch, self).__init__()
self.channel_reduction_layer = Conv2d_GN_ReLU(in_channels, in_channels//2, num_groups)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
self.conv_gn_relu = Conv2d_GN_ReLU(int(in_channels//2 * (num_encoders+1)), out_channels, num_groups)
def forward(self, x, skips):
""" Forward module
@param skips: a list of intermediate skip-layer torch tensors from each encoder
"""
x = self.channel_reduction_layer(x)
x = self.upsample(x)
out = torch.cat([x] + skips, dim=1) # Concat on channels dimension
out = self.conv_gn_relu(out)
return out
def maxpool2x2(input, ksize=2, stride=2):
"""2x2 max pooling"""
return nn.MaxPool2d(ksize, stride=stride)(input)
# another way to build encoder/decoder
def make_encoder_layers(cfg, in_channels=3, batch_norm=False):
layers = []
output_scale = 1.0
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
output_scale /= 2.0
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.LeakyReLU(negative_slope=0.2,inplace=True)]
else:
layers += [conv2d, nn.LeakyReLU(negative_slope=0.2,inplace=True)]
in_channels = v
return nn.Sequential(*layers), in_channels, output_scale
def make_decoder_layers(cfg, in_channels, batch_norm=False):
layers = []
for i in range(len(cfg)):
v = cfg[i]
if type(v) is str:
if v[0] == 'd':
v = int(v[1:])
convtrans2d = nn.ConvTranspose2d(in_channels, v, kernel_size=4, stride=2, padding=1)
if batch_norm:
layers += [convtrans2d, nn.BatchNorm2d(v), nn.LeakyReLU(negative_slope=0.2, inplace=True)]
else:
layers += [convtrans2d, nn.LeakyReLU(negative_slope=0.2, inplace=True)]
in_channels = v
elif v[0] == 'c':
v = int(v[1:])
layers += [nn.Conv2d(in_channels, v, kernel_size=3, padding=1)]
elif v[0] == 'D':
layers += [nn.Dropout(p=0.2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.LeakyReLU(negative_slope=0.2, inplace=True)]
else:
# no relu for the last layer for embedding
if i == len(cfg) - 1:
layers += [conv2d]
else:
layers += [conv2d, nn.LeakyReLU(negative_slope=0.2, inplace=True)]
in_channels = v
return nn.Sequential(*layers)
################## Network Definitions ##################
class UNet_Encoder(nn.Module):
def __init__(self, input_channels, feature_dim):
super(UNet_Encoder, self).__init__()
self.ic = input_channels
self.fd = feature_dim
self.build_network()
def build_network(self):
""" Build encoder network
Uses a U-Net-like architecture
"""
### Encoder ###
self.layer1 = Conv2d_GN_ReLUx2(self.ic, self.fd, self.fd)
self.layer2 = Conv2d_GN_ReLUx2(self.fd, self.fd*2, self.fd)
self.layer3 = Conv2d_GN_ReLUx2(self.fd*2, self.fd*4, self.fd)
self.layer4 = Conv2d_GN_ReLUx2(self.fd*4, self.fd*8, self.fd)
self.last_layer = Conv2d_GN_ReLU(self.fd*8, self.fd*16, self.fd)
def forward(self, images):
x1 = self.layer1(images)
mp_x1 = maxpool2x2(x1)
x2 = self.layer2(mp_x1)
mp_x2 = maxpool2x2(x2)
x3 = self.layer3(mp_x2)
mp_x3 = maxpool2x2(x3)
x4 = self.layer4(mp_x3)
mp_x4 = maxpool2x2(x4)
x5 = self.last_layer(mp_x4)
return x5, [x1, x2, x3, x4]
class UNet_Decoder(nn.Module):
""" A U-Net decoder that allows for multiple encoders
"""
def __init__(self, num_encoders, feature_dim, coordconv=False):
super(UNet_Decoder, self).__init__()
self.ne = num_encoders
self.fd = feature_dim
self.coordconv = coordconv
self.build_network()
def build_network(self):
""" Build a decoder network
Uses a U-Net-like architecture
"""
# Fusion layer
self.fuse_layer = Conv2d_GN_ReLU(self.fd*16 * self.ne, self.fd*16, self.fd, ksize=1)
# Decoding
self.layer1 = Upsample_Concat_Conv2d_GN_ReLU_Multi_Branch(self.fd*16, self.fd*8, self.fd, self.ne)
self.layer2 = Upsample_Concat_Conv2d_GN_ReLU_Multi_Branch(self.fd*8, self.fd*4, self.fd, self.ne)
self.layer3 = Upsample_Concat_Conv2d_GN_ReLU_Multi_Branch(self.fd*4, self.fd*2, self.fd, self.ne)
self.layer4 = Upsample_Concat_Conv2d_GN_ReLU_Multi_Branch(self.fd*2, self.fd, self.fd, self.ne)
# Final layer
self.layer5 = Conv2d_GN_ReLU(self.fd, self.fd, self.fd)
if self.coordconv:
# Extra 1x1 Conv layers for CoordConv
self.layer6 = Conv2d_GN_ReLUx2(self.fd+2, self.fd, self.fd, ksize=1)
self.layer7 = Conv2d_GN_ReLUx2(self.fd, self.fd, self.fd, ksize=1)
# This puts features everywhere, not just nonnegative orthant
self.last_conv = nn.Conv2d(self.fd, self.fd, kernel_size=3,
stride=1, padding=1, bias=True)
def forward(self, encoder_list):
""" Forward module
@param encoder_list: a list of tuples
each tuple includes 2 elements:
- output of encoder: an [N x C x H x W] torch tensor
- list of intermediate outputs: a list of 4 torch tensors
"""
# Apply fusion layer to the concatenation of encoder outputs
out = torch.cat([x[0] for x in encoder_list], dim=1) # Concatenate on channels dimension
out = self.fuse_layer(out)
out = self.layer1(out, [x[1][3] for x in encoder_list])
out = self.layer2(out, [x[1][2] for x in encoder_list])
out = self.layer3(out, [x[1][1] for x in encoder_list])
out = self.layer4(out, [x[1][0] for x in encoder_list])
out = self.layer5(out)
if self.coordconv:
out = util_.concatenate_spatial_coordinates(out)
out = self.layer6(out)
out = self.layer7(out)
out = self.last_conv(out)
return out
| 9,089 |
Python
| 36.561983 | 110 | 0.57355 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/networks/resnet_dilated.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import numpy as np
import torch.nn as nn
from . import resnet as models
def adjust_input_image_size_for_proper_feature_alignment(input_img_batch, output_stride=8):
"""Resizes the input image to allow proper feature alignment during the
forward propagation.
Resizes the input image to a closest multiple of `output_stride` + 1.
This allows the proper alignment of features.
To get more details, read here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py#L159
Parameters
----------
input_img_batch : torch.Tensor
Tensor containing a single input image of size (1, 3, h, w)
output_stride : int
Output stride of the network where the input image batch
will be fed.
Returns
-------
input_img_batch_new_size : torch.Tensor
Resized input image batch tensor
"""
input_spatial_dims = np.asarray( input_img_batch.shape[2:], dtype=np.float )
# Comments about proper alignment can be found here
# https://github.com/tensorflow/models/blob/master/research/slim/nets/resnet_v1.py#L159
new_spatial_dims = np.ceil(input_spatial_dims / output_stride).astype(np.int) * output_stride + 1
# Converting the numpy to list, torch.nn.functional.upsample_bilinear accepts
# size in the list representation.
new_spatial_dims = list(new_spatial_dims)
input_img_batch_new_size = nn.functional.upsample_bilinear(input=input_img_batch,
size=new_spatial_dims)
return input_img_batch_new_size
class Resnet101_8s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet101_8s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet101_8s = models.resnet101(fully_conv=True,
pretrained=True,
output_stride=8,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet101_8s.fc = nn.Conv2d(resnet101_8s.inplanes, num_classes, 1)
self.resnet101_8s = resnet101_8s
self._normal_initialization(self.resnet101_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet101_8s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet18_8s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet18_8s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet18_8s = models.resnet18(fully_conv=True,
pretrained=True,
output_stride=8,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet18_8s.fc = nn.Conv2d(resnet18_8s.inplanes, num_classes, 1)
self.resnet18_8s = resnet18_8s
self._normal_initialization(self.resnet18_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x, feature_alignment=False):
input_spatial_dim = x.size()[2:]
if feature_alignment:
x = adjust_input_image_size_for_proper_feature_alignment(x, output_stride=8)
x = self.resnet18_8s(x)
x = nn.functional.upsample(x, size=input_spatial_dim, mode='bilinear', align_corners=True)
#x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)#, align_corners=False)
return x
class Resnet18_16s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet18_16s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 16
resnet18_16s = models.resnet18(fully_conv=True,
pretrained=True,
output_stride=16,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet18_16s.fc = nn.Conv2d(resnet18_16s.inplanes, num_classes, 1)
self.resnet18_16s = resnet18_16s
self._normal_initialization(self.resnet18_16s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet18_16s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet18_32s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet18_32s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet18_32s = models.resnet18(fully_conv=True,
pretrained=True,
output_stride=32,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet18_32s.fc = nn.Conv2d(resnet18_32s.inplanes, num_classes, 1)
self.resnet18_32s = resnet18_32s
self._normal_initialization(self.resnet18_32s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet18_32s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet34_32s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet34_32s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet34_32s = models.resnet34(fully_conv=True,
pretrained=True,
output_stride=32,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet34_32s.fc = nn.Conv2d(resnet34_32s.inplanes, num_classes, 1)
self.resnet34_32s = resnet34_32s
self._normal_initialization(self.resnet34_32s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet34_32s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet34_16s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet34_16s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet34_16s = models.resnet34(fully_conv=True,
pretrained=True,
output_stride=16,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet34_16s.fc = nn.Conv2d(resnet34_16s.inplanes, num_classes, 1)
self.resnet34_16s = resnet34_16s
self._normal_initialization(self.resnet34_16s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet34_16s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet34_8s(nn.Module):
def __init__(self, num_classes=1000, input_channels=3, pretrained=True):
super(Resnet34_8s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet34_8s = models.resnet34(fully_conv=True,
pretrained=pretrained,
output_stride=8,
input_channels=input_channels,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet34_8s.fc = nn.Conv2d(resnet34_8s.inplanes, num_classes, 1)
self.resnet34_8s = resnet34_8s
self._normal_initialization(self.resnet34_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x, feature_alignment=False):
input_spatial_dim = x.size()[2:]
if feature_alignment:
x = adjust_input_image_size_for_proper_feature_alignment(x, output_stride=8)
x = self.resnet34_8s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet34_8s_fc(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet34_8s_fc, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet34_8s = models.resnet34(fully_conv=False,
pretrained=True,
output_stride=8,
remove_avg_pool_layer=False)
# Randomly initialize the new fc layer
resnet34_8s.fc = nn.Linear(resnet34_8s.inplanes * 4, num_classes)
self.resnet34_8s = resnet34_8s
self._normal_initialization(self.resnet34_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x, feature_alignment=False):
if feature_alignment:
x = adjust_input_image_size_for_proper_feature_alignment(x, output_stride=8)
x = self.resnet34_8s(x)
return x
class Resnet50_32s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet50_32s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet50_32s = models.resnet50(fully_conv=True,
pretrained=True,
output_stride=32,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet50_32s.fc = nn.Conv2d(resnet50_32s.inplanes, num_classes, 1)
self.resnet50_32s = resnet50_32s
self._normal_initialization(self.resnet50_32s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet50_32s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet50_16s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet50_16s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 16
resnet50_8s = models.resnet50(fully_conv=True,
pretrained=True,
output_stride=16,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet50_8s.fc = nn.Conv2d(resnet50_8s.inplanes, num_classes, 1)
self.resnet50_8s = resnet50_8s
self._normal_initialization(self.resnet50_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet50_8s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet50_8s(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet50_8s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet50_8s = models.resnet50(fully_conv=True,
pretrained=True,
output_stride=8,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet50_8s.fc = nn.Conv2d(resnet50_8s.inplanes, num_classes, 1)
self.resnet50_8s = resnet50_8s
self._normal_initialization(self.resnet50_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet50_8s(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
class Resnet9_8s(nn.Module):
# Gets ~ 46 MIOU on Pascal Voc
def __init__(self, num_classes=1000):
super(Resnet9_8s, self).__init__()
# Load the pretrained weights, remove avg pool
# layer and get the output stride of 8
resnet18_8s = models.resnet18(fully_conv=True,
pretrained=True,
output_stride=8,
remove_avg_pool_layer=True)
# Randomly initialize the 1x1 Conv scoring layer
resnet18_8s.fc = nn.Conv2d(resnet18_8s.inplanes, num_classes, 1)
self.resnet18_8s = resnet18_8s
self._normal_initialization(self.resnet18_8s.fc)
def _normal_initialization(self, layer):
layer.weight.data.normal_(0, 0.01)
layer.bias.data.zero_()
def forward(self, x):
input_spatial_dim = x.size()[2:]
x = self.resnet18_8s.conv1(x)
x = self.resnet18_8s.bn1(x)
x = self.resnet18_8s.relu(x)
x = self.resnet18_8s.maxpool(x)
x = self.resnet18_8s.layer1[0](x)
x = self.resnet18_8s.layer2[0](x)
x = self.resnet18_8s.layer3[0](x)
x = self.resnet18_8s.layer4[0](x)
x = self.resnet18_8s.fc(x)
x = nn.functional.upsample_bilinear(input=x, size=input_spatial_dim)
return x
| 16,162 |
Python
| 30.142582 | 101 | 0.53205 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/networks/utils.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
def log_softmax_high_dimension(input):
num_classes = input.size()[1]
m = torch.max(input, dim=1, keepdim=True)[0]
if input.dim() == 4:
d = input - m.repeat(1, num_classes, 1, 1)
else:
d = input - m.repeat(1, num_classes)
e = torch.exp(d)
s = torch.sum(e, dim=1, keepdim=True)
if input.dim() == 4:
output = d - torch.log(s.repeat(1, num_classes, 1, 1))
else:
output = d - torch.log(s.repeat(1, num_classes))
return output
def softmax_high_dimension(input):
num_classes = input.size()[1]
m = torch.max(input, dim=1, keepdim=True)[0]
if input.dim() == 4:
e = torch.exp(input - m.repeat(1, num_classes, 1, 1))
else:
e = torch.exp(input - m.repeat(1, num_classes))
s = torch.sum(e, dim=1, keepdim=True)
if input.dim() == 4:
output = torch.div(e, s.repeat(1, num_classes, 1, 1))
else:
output = torch.div(e, s.repeat(1, num_classes))
return output
def concatenate_spatial_coordinates(feature_map):
""" Adds x,y coordinates as channels to feature map
@param feature_map: a [T x C x H x W] torch tensor
"""
T, C, H, W = feature_map.shape
# build matrix of indices. then replicated it T times
MoI = build_matrix_of_indices(H, W) # Shape: [H, W, 2]
MoI = np.tile(MoI, (T, 1, 1, 1)) # Shape: [T, H, W, 2]
MoI[..., 0] = MoI[..., 0] / (H-1) * 2 - 1 # in [-1, 1]
MoI[..., 1] = MoI[..., 1] / (W-1) * 2 - 1
MoI = torch.from_numpy(MoI).permute(0,3,1,2).to(feature_map.device) # Shape: [T, 2, H, W]
# Concatenate on the channels dimension
feature_map = torch.cat([feature_map, MoI], dim=1)
return feature_map
| 1,881 |
Python
| 32.607142 | 93 | 0.58958 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/networks/SEG.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import copy
from fcn.config import cfg
from networks.utils import log_softmax_high_dimension, softmax_high_dimension
from networks.embedding import EmbeddingLoss
from . import unets
from . import resnet_dilated
__all__ = [
'seg_vgg_embedding', 'seg_unet_embedding', 'seg_resnet34_8s_embedding_early',
'seg_resnet34_8s_embedding', 'seg_resnet50_8s_embedding',
]
encoder_archs = {
'vgg16-based-16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M', 1024, 1024]
}
# Segmentation Network
class SEGNET(nn.Module):
'''SEGNET a Encoder-Decoder for Object Segmentation.'''
def __init__(self, init_weights=True, batch_norm=False, in_channels=3,
network_name='vgg', num_units=64, use_coordconv=False):
super(SEGNET, self).__init__()
self.network_name = network_name
self.in_channels = in_channels
self.metric = cfg.TRAIN.EMBEDDING_METRIC
self.normalize = cfg.TRAIN.EMBEDDING_NORMALIZATION
self.input_type = cfg.INPUT
self.fusion_type = cfg.TRAIN.FUSION_TYPE
self.embedding_pretrain = cfg.TRAIN.EMBEDDING_PRETRAIN
# embedding loss
alpha = cfg.TRAIN.EMBEDDING_ALPHA
delta = cfg.TRAIN.EMBEDDING_DELTA
lambda_intra = cfg.TRAIN.EMBEDDING_LAMBDA_INTRA
lambda_inter = cfg.TRAIN.EMBEDDING_LAMBDA_INTER
self.embedding_loss = EmbeddingLoss(alpha, delta, lambda_intra, lambda_inter, self.metric, self.normalize)
decoder_archs = {
'd16': [1024, 'd512', 512, 512, 'D', 'd512', 512, 512, 'D', 'd256', 256, 256, 'd128', 128, 128, 'd64', 64, 64, 'c2'],
'd16-embedding': [1024, 'd512', 512, 512, 'D', 'd512', 512, 512, 'D', 'd256', 256, 256, 'd128', 128, 128, 'd64', 64, num_units],
}
if network_name == 'vgg':
# encoder
en_layers, en_out_channels, en_output_scale = unets.make_encoder_layers(encoder_archs['vgg16-based-16'],
in_channels=in_channels, batch_norm=batch_norm)
self.features = en_layers
# decoder
de_in_channels = int(en_out_channels)
de_layers = unets.make_decoder_layers(decoder_archs['d16-embedding'], de_in_channels, batch_norm=batch_norm)
self.decoder = de_layers
elif network_name == 'unet':
# encoder
self.encoder = unets.UNet_Encoder(input_channels=in_channels, feature_dim=num_units)
# decoder
self.decoder = unets.UNet_Decoder(num_encoders=1, feature_dim=num_units, coordconv=use_coordconv)
else:
self.fcn = getattr(resnet_dilated, network_name)(num_classes=num_units, input_channels=in_channels, pretrained=self.embedding_pretrain)
if self.input_type == 'RGBD' and self.fusion_type != 'early':
self.fcn_depth = getattr(resnet_dilated, network_name)(num_classes=num_units, input_channels=in_channels, pretrained=False)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.xavier_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, img, label, depth=None):
if self.network_name == 'vgg':
en = self.features(img)
elif self.network_name == 'unet':
en = [self.encoder(img)]
if self.network_name == 'vgg' or self.network_name == 'unet':
features = self.decoder(en)
else:
if self.input_type == 'DEPTH':
features = self.fcn(depth)
elif self.input_type == 'COLOR':
features = self.fcn(img)
elif self.input_type == 'RGBD' and self.fusion_type == 'early':
inputs = torch.cat((img, depth), 1)
features = self.fcn(inputs)
else:
features = self.fcn(img)
features_depth = self.fcn_depth(depth)
if self.fusion_type == 'add':
features = features + features_depth
else:
features = torch.cat((features, features_depth), 1)
# normalization
if self.normalize:
features = F.normalize(features, p=2, dim=1)
if self.training:
loss, intra_cluster_loss, inter_cluster_loss = self.embedding_loss(features, label)
return loss, intra_cluster_loss, inter_cluster_loss, features
else:
return features
def weight_parameters(self):
return [param for name, param in self.named_parameters() if 'weight' in name]
def bias_parameters(self):
return [param for name, param in self.named_parameters() if 'bias' in name]
#############################################################
def update_model_print(model, data):
model_dict = model.state_dict()
print('model keys')
print('=================================================')
for k, v in model_dict.items():
print(k)
print('=================================================')
if data is not None:
print('data keys')
print('=================================================')
data_new = data.copy()
for k, v in data.items():
print(k)
# legency with the orignially trained model
if 'module.' in k:
data_new[k[7:]] = v
if 'decoder.features.' in k:
new_key = 'decoder.' + k[17:]
data_new[new_key] = v
print('=================================================')
pretrained_dict = {k: v for k, v in data_new.items() if k in model_dict and v.size() == model_dict[k].size()}
print('load the following keys from the pretrained model')
print('=================================================')
for k, v in pretrained_dict.items():
print(k)
print('=================================================')
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def update_model(model, data):
model_dict = model.state_dict()
if data is not None:
data_new = data.copy()
for k, v in data.items():
# legency with the orignially trained model
if 'module.' in k:
data_new[k[7:]] = v
if 'decoder.features.' in k:
new_key = 'decoder.' + k[17:]
data_new[new_key] = v
pretrained_dict = {k: v for k, v in data_new.items() if k in model_dict and v.size() == model_dict[k].size()}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# feature embedding learning network
def seg_vgg_embedding(num_classes=2, num_units=64, data=None):
model = SEGNET(in_channels=3, network_name='vgg', num_units=num_units)
update_model(model, data)
return model
def seg_unet_embedding(num_classes=2, num_units=64, data=None):
model = SEGNET(in_channels=3, network_name='unet', num_units=num_units)
update_model(model, data)
return model
def seg_resnet34_8s_embedding(num_classes=2, num_units=64, data=None):
model = SEGNET(in_channels=3, network_name='Resnet34_8s', num_units=num_units)
update_model(model, data)
return model
def seg_resnet34_8s_embedding_early(num_classes=2, num_units=64, data=None):
model = SEGNET(in_channels=6, network_name='Resnet34_8s', num_units=num_units)
update_model(model, data)
return model
def seg_resnet50_8s_embedding(num_classes=2, num_units=64, data=None):
model = SEGNET(in_channels=3, network_name='Resnet50_8s', num_units=num_units)
update_model(model, data)
return model
| 8,262 |
Python
| 39.504902 | 147 | 0.574316 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/evaluation.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import numpy as np
import cv2
# My libraries
from utils import munkres as munkres
BACKGROUND_LABEL = 0
OBJECTS_LABEL = 1
# Code adapted from: https://github.com/davisvideochallenge/davis-2017/blob/master/python/lib/davis/measures/f_boundary.py
def seg2bmap(seg,width=None,height=None):
"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
David Martin <[email protected]>
January 2003
"""
seg = seg.astype(np.bool)
seg[seg>0] = 1
assert np.atleast_3d(seg).shape[2] == 1
width = seg.shape[1] if width is None else width
height = seg.shape[0] if height is None else height
h,w = seg.shape[:2]
ar1 = float(width) / float(height)
ar2 = float(w) / float(h)
assert not (width>w | height>h | abs(ar1-ar2)>0.01),\
'Can''t convert %dx%d seg to %dx%d bmap.'%(w,h,width,height)
e = np.zeros_like(seg)
s = np.zeros_like(seg)
se = np.zeros_like(seg)
e[:,:-1] = seg[:,1:]
s[:-1,:] = seg[1:,:]
se[:-1,:-1] = seg[1:,1:]
b = seg^e | seg^s | seg^se
b[-1,:] = seg[-1,:]^e[-1,:]
b[:,-1] = seg[:,-1]^s[:,-1]
b[-1,-1] = 0
# from IPython import embed; embed()
if w == width and h == height:
bmap = b
else:
bmap = np.zeros((height,width))
for x in range(w):
for y in range(h):
if b[y,x]:
j = 1+floor((y-1)+height / h)
i = 1+floor((x-1)+width / h)
bmap[j,i] = 1;
return bmap
# Code adapted from: https://github.com/davisvideochallenge/davis-2017/blob/master/python/lib/davis/measures/f_boundary.py
def boundary_overlap(predicted_mask, gt_mask, bound_th=0.003):
"""
Compute true positives of overlapped masks, using dilated boundaries
Arguments:
predicted_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
overlap (float): IoU overlap of boundaries
"""
assert np.atleast_3d(predicted_mask).shape[2] == 1
bound_pix = bound_th if bound_th >= 1 else \
np.ceil(bound_th*np.linalg.norm(predicted_mask.shape))
# Get the pixel boundaries of both masks
fg_boundary = seg2bmap(predicted_mask);
gt_boundary = seg2bmap(gt_mask);
from skimage.morphology import disk
# Dilate segmentation boundaries
gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix), iterations=1)
fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix), iterations=1)
# Get the intersection (true positives). Calculate true positives differently for
# precision and recall since we have to dilated the boundaries
fg_match = np.logical_and(fg_boundary, gt_dil)
gt_match = np.logical_and(gt_boundary, fg_dil)
# Return precision_tps, recall_tps (tps = true positives)
return np.sum(fg_match), np.sum(gt_match)
# This function is modeled off of P/R/F measure as described by Dave et al. (arXiv19)
def multilabel_metrics(prediction, gt, obj_detect_threshold=0.75):
""" Compute Overlap and Boundary Precision, Recall, F-measure
Also compute #objects detected, #confident objects detected, #GT objects.
It computes these measures only of objects (2+), not background (0) / table (1).
Uses the Hungarian algorithm to match predicted masks with ground truth masks.
A "confident object" is an object that is predicted with more than 0.75 F-measure
@param gt: a [H x W] numpy.ndarray with ground truth masks
@param prediction: a [H x W] numpy.ndarray with predicted masks
@return: a dictionary with the metrics
"""
### Compute F-measure, True Positive matrices ###
# Get unique OBJECT labels from GT and prediction
labels_gt = np.unique(gt)
labels_gt = labels_gt[~np.isin(labels_gt, [BACKGROUND_LABEL])]
num_labels_gt = labels_gt.shape[0]
labels_pred = np.unique(prediction)
labels_pred = labels_pred[~np.isin(labels_pred, [BACKGROUND_LABEL])]
num_labels_pred = labels_pred.shape[0]
# F-measure, True Positives, Boundary stuff
F = np.zeros((num_labels_gt, num_labels_pred))
true_positives = np.zeros((num_labels_gt, num_labels_pred))
boundary_stuff = np.zeros((num_labels_gt, num_labels_pred, 2))
# Each item of "boundary_stuff" contains: precision true positives, recall true positives
# Edge cases
if (num_labels_pred == 0 and num_labels_gt > 0 ): # all false negatives
return {'Objects F-measure' : 0.,
'Objects Precision' : 1.,
'Objects Recall' : 0.,
'Boundary F-measure' : 0.,
'Boundary Precision' : 1.,
'Boundary Recall' : 0.,
'obj_detected' : num_labels_pred,
'obj_detected_075' : 0.,
'obj_gt' : num_labels_gt,
'obj_detected_075_percentage' : 0.,
}
elif (num_labels_pred > 0 and num_labels_gt == 0 ): # all false positives
return {'Objects F-measure' : 0.,
'Objects Precision' : 0.,
'Objects Recall' : 1.,
'Boundary F-measure' : 0.,
'Boundary Precision' : 0.,
'Boundary Recall' : 1.,
'obj_detected' : num_labels_pred,
'obj_detected_075' : 0.,
'obj_gt' : num_labels_gt,
'obj_detected_075_percentage' : 0.,
}
elif (num_labels_pred == 0 and num_labels_gt == 0 ): # correctly predicted nothing
return {'Objects F-measure' : 1.,
'Objects Precision' : 1.,
'Objects Recall' : 1.,
'Boundary F-measure' : 1.,
'Boundary Precision' : 1.,
'Boundary Recall' : 1.,
'obj_detected' : num_labels_pred,
'obj_detected_075' : 0.,
'obj_gt' : num_labels_gt,
'obj_detected_075_percentage' : 1.,
}
# For every pair of GT label vs. predicted label, calculate stuff
for i, gt_i in enumerate(labels_gt):
gt_i_mask = (gt == gt_i)
for j, pred_j in enumerate(labels_pred):
pred_j_mask = (prediction == pred_j)
### Overlap Stuff ###
# true positive
A = np.logical_and(pred_j_mask, gt_i_mask)
tp = np.int64(np.count_nonzero(A)) # Cast this to numpy.int64 so 0/0 = nan
true_positives[i,j] = tp
# precision
prec = tp/np.count_nonzero(pred_j_mask)
# recall
rec = tp/np.count_nonzero(gt_i_mask)
# F-measure
if prec + rec > 0:
F[i,j] = (2 * prec * rec) / (prec + rec)
### Boundary Stuff ###
boundary_stuff[i,j] = boundary_overlap(pred_j_mask, gt_i_mask)
### More Boundary Stuff ###
boundary_prec_denom = 0. # precision_tps + precision_fps
for pred_j in labels_pred:
pred_mask = (prediction == pred_j)
boundary_prec_denom += np.sum(seg2bmap(pred_mask))
boundary_rec_denom = 0. # recall_tps + recall_fns
for gt_i in labels_gt:
gt_mask = (gt == gt_i)
boundary_rec_denom += np.sum(seg2bmap(gt_mask))
### Compute the Hungarian assignment ###
F[np.isnan(F)] = 0
m = munkres.Munkres()
assignments = m.compute(F.max() - F.copy()) # list of (y,x) indices into F (these are the matchings)
### Compute the number of "detected objects" ###
num_obj_detected = 0
for a in assignments:
if F[a] > obj_detect_threshold:
num_obj_detected += 1
### Compute metrics with assignments ###
idx = tuple(np.array(assignments).T)
# Overlap measures
precision = np.sum(true_positives[idx]) / np.sum(prediction.clip(0,1) == OBJECTS_LABEL)
recall = np.sum(true_positives[idx]) / np.sum(gt.clip(0,1) == OBJECTS_LABEL)
F_measure = (2 * precision * recall) / (precision + recall)
if np.isnan(F_measure): # b/c precision = recall = 0
F_measure = 0
# Boundary measures
boundary_precision = np.sum(boundary_stuff[idx][:,0]) / boundary_prec_denom
boundary_recall = np.sum(boundary_stuff[idx][:,1]) / boundary_rec_denom
boundary_F_measure = (2 * boundary_precision * boundary_recall) / (boundary_precision + boundary_recall)
if np.isnan(boundary_F_measure): # b/c/ precision = recall = 0
boundary_F_measure = 0
return {'Objects F-measure' : F_measure,
'Objects Precision' : precision,
'Objects Recall' : recall,
'Boundary F-measure' : boundary_F_measure,
'Boundary Precision' : boundary_precision,
'Boundary Recall' : boundary_recall,
'obj_detected' : num_labels_pred,
'obj_detected_075' : num_obj_detected,
'obj_gt' : num_labels_gt,
'obj_detected_075_percentage' : num_obj_detected / num_labels_gt,
}
| 9,614 |
Python
| 36.267442 | 122 | 0.58394 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/mean_shift.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import sys
import torch
import torch.nn.functional as F
import numpy as np
from fcn.config import cfg
def ball_kernel(Z, X, kappa, metric='cosine'):
""" Computes pairwise ball kernel (without normalizing constant)
(note this is kernel as defined in non-parametric statistics, not a kernel as in RKHS)
@param Z: a [n x d] torch.FloatTensor of NORMALIZED datapoints - the seeds
@param X: a [m x d] torch.FloatTensor of NORMALIZED datapoints - the points
@return: a [n x m] torch.FloatTensor of pairwise ball kernel computations,
without normalizing constant
"""
if metric == 'euclidean':
distance = Z.unsqueeze(1) - X.unsqueeze(0)
distance = torch.norm(distance, dim=2)
kernel = torch.exp(-kappa * torch.pow(distance, 2))
elif metric == 'cosine':
kernel = torch.exp(kappa * torch.mm(Z, X.t()))
return kernel
def get_label_mode(array):
""" Computes the mode of elements in an array.
Ties don't matter. Ties are broken by the smallest value (np.argmax defaults)
@param array: a numpy array
"""
labels, counts = np.unique(array, return_counts=True)
mode = labels[np.argmax(counts)].item()
return mode
def connected_components(Z, epsilon, metric='cosine'):
"""
For the connected components, we simply perform a nearest neighbor search in order:
for each point, find the points that are up to epsilon away (in cosine distance)
these points are labeled in the same cluster.
@param Z: a [n x d] torch.FloatTensor of NORMALIZED datapoints
@return: a [n] torch.LongTensor of cluster labels
"""
n, d = Z.shape
K = 0
cluster_labels = torch.ones(n, dtype=torch.long) * -1
if Z.is_cuda:
cluster_labels = cluster_labels.cuda()
for i in range(n):
if cluster_labels[i] == -1:
if metric == 'euclidean':
distances = Z.unsqueeze(1) - Z[i:i + 1].unsqueeze(0) # a are points, b are seeds
distances = torch.norm(distances, dim=2)
elif metric == 'cosine':
distances = 0.5 * (1 - torch.mm(Z, Z[i:i+1].t()))
component_seeds = distances[:, 0] <= epsilon
# If at least one component already has a label, then use the mode of the label
if torch.unique(cluster_labels[component_seeds]).shape[0] > 1:
temp = cluster_labels[component_seeds].cpu().numpy()
temp = temp[temp != -1]
label = torch.tensor(get_label_mode(temp))
else:
label = torch.tensor(K)
K += 1 # Increment number of clusters
cluster_labels[component_seeds] = label
return cluster_labels
def seed_hill_climbing_ball(X, Z, kappa, max_iters=10, metric='cosine'):
""" Runs mean shift hill climbing algorithm on the seeds.
The seeds climb the distribution given by the KDE of X
@param X: a [n x d] torch.FloatTensor of d-dim unit vectors
@param Z: a [m x d] torch.FloatTensor of seeds to run mean shift from
@param dist_threshold: parameter for the ball kernel
"""
n, d = X.shape
m = Z.shape[0]
for _iter in range(max_iters):
# Create a new object for Z
new_Z = Z.clone()
W = ball_kernel(Z, X, kappa, metric=metric)
# use this allocated weight to compute the new center
new_Z = torch.mm(W, X) # Shape: [n x d]
# Normalize the update
if metric == 'euclidean':
summed_weights = W.sum(dim=1)
summed_weights = summed_weights.unsqueeze(1)
summed_weights = torch.clamp(summed_weights, min=1.0)
Z = new_Z / summed_weights
elif metric == 'cosine':
Z = F.normalize(new_Z, p=2, dim=1)
return Z
def mean_shift_with_seeds(X, Z, kappa, max_iters=10, metric='cosine'):
""" Runs mean-shift
@param X: a [n x d] torch.FloatTensor of d-dim unit vectors
@param Z: a [m x d] torch.FloatTensor of seeds to run mean shift from
@param dist_threshold: parameter for the von Mises-Fisher distribution
"""
Z = seed_hill_climbing_ball(X, Z, kappa, max_iters=max_iters, metric=metric)
# Connected components
cluster_labels = connected_components(Z, 2 * cfg.TRAIN.EMBEDDING_ALPHA, metric=metric) # Set epsilon = 0.1 = 2*alpha
return cluster_labels, Z
def select_smart_seeds(X, num_seeds, return_selected_indices=False, init_seeds=None, num_init_seeds=None, metric='cosine'):
""" Selects seeds that are as far away as possible
@param X: a [n x d] torch.FloatTensor of d-dim unit vectors
@param num_seeds: number of seeds to pick
@param init_seeds: a [num_seeds x d] vector of initial seeds
@param num_init_seeds: the number of seeds already chosen.
the first num_init_seeds rows of init_seeds have been chosen already
@return: a [num_seeds x d] matrix of seeds
a [n x num_seeds] matrix of distances
"""
n, d = X.shape
selected_indices = -1 * torch.ones(num_seeds, dtype=torch.long)
# Initialize seeds matrix
if init_seeds is None:
seeds = torch.empty((num_seeds, d), device=X.device)
num_chosen_seeds = 0
else:
seeds = init_seeds
num_chosen_seeds = num_init_seeds
# Keep track of distances
distances = torch.empty((n, num_seeds), device=X.device)
if num_chosen_seeds == 0: # Select first seed if need to
selected_seed_index = np.random.randint(0, n)
selected_indices[0] = selected_seed_index
selected_seed = X[selected_seed_index, :]
seeds[0, :] = selected_seed
if metric == 'euclidean':
distances[:, 0] = torch.norm(X - selected_seed.unsqueeze(0), dim=1)
elif metric == 'cosine':
distances[:, 0] = 0.5 * (1 - torch.mm(X, selected_seed.unsqueeze(1))[:,0])
num_chosen_seeds += 1
else: # Calculate distance to each already chosen seed
for i in range(num_chosen_seeds):
if metric == 'euclidean':
distances[:, i] = torch.norm(X - seeds[i:i+1, :], dim=1)
elif metric == 'cosine':
distances[:, i] = 0.5 * (1 - torch.mm(X, seeds[i:i+1, :].t())[:, 0])
# Select rest of seeds
for i in range(num_chosen_seeds, num_seeds):
# Find the point that has the furthest distance from the nearest seed
distance_to_nearest_seed = torch.min(distances[:, :i], dim=1)[0] # Shape: [n]
selected_seed_index = torch.argmax(distance_to_nearest_seed)
selected_indices[i] = selected_seed_index
selected_seed = torch.index_select(X, 0, selected_seed_index)[0, :]
seeds[i, :] = selected_seed
# Calculate distance to this selected seed
if metric == 'euclidean':
distances[:, i] = torch.norm(X - selected_seed.unsqueeze(0), dim=1)
elif metric == 'cosine':
distances[:, i] = 0.5 * (1 - torch.mm(X, selected_seed.unsqueeze(1))[:,0])
return_tuple = (seeds,)
if return_selected_indices:
return_tuple += (selected_indices,)
return return_tuple
def mean_shift_smart_init(X, kappa, num_seeds=100, max_iters=10, metric='cosine'):
""" Runs mean shift with carefully selected seeds
@param X: a [n x d] torch.FloatTensor of d-dim unit vectors
@param dist_threshold: parameter for the von Mises-Fisher distribution
@param num_seeds: number of seeds used for mean shift clustering
@return: a [n] array of cluster labels
"""
n, d = X.shape
seeds, selected_indices = select_smart_seeds(X, num_seeds, return_selected_indices=True, metric=metric)
seed_cluster_labels, updated_seeds = mean_shift_with_seeds(X, seeds, kappa, max_iters=max_iters, metric=metric)
# Get distances to updated seeds
if metric == 'euclidean':
distances = X.unsqueeze(1) - updated_seeds.unsqueeze(0) # a are points, b are seeds
distances = torch.norm(distances, dim=2)
elif metric == 'cosine':
distances = 0.5 * (1 - torch.mm(X, updated_seeds.t())) # Shape: [n x num_seeds]
# Get clusters by assigning point to closest seed
closest_seed_indices = torch.argmin(distances, dim=1) # Shape: [n]
cluster_labels = seed_cluster_labels[closest_seed_indices]
# assign zero to the largest cluster
num = len(torch.unique(seed_cluster_labels))
count = torch.zeros(num, dtype=torch.long)
for i in range(num):
count[i] = (cluster_labels == i).sum()
label_max = torch.argmax(count)
if label_max != 0:
index1 = cluster_labels == 0
index2 = cluster_labels == label_max
cluster_labels[index1] = label_max
cluster_labels[index2] = 0
return cluster_labels, selected_indices
| 9,097 |
Python
| 37.880342 | 123 | 0.617236 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/mask.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import sys, os
from itertools import compress
import torch
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.io
import cv2
from PIL import Image
def get_color_mask(object_index, nc=None):
""" Colors each index differently. Useful for visualizing semantic masks
@param object_index: a [H x W] numpy array of ints from {0, ..., nc-1}
@param nc: total number of colors. If None, this will be inferred by masks
"""
object_index = object_index.astype(int)
if nc is None:
NUM_COLORS = object_index.max() + 1
else:
NUM_COLORS = nc
cm = plt.get_cmap('gist_rainbow')
colors = [cm(1. * i/NUM_COLORS) for i in range(NUM_COLORS)]
color_mask = np.zeros(object_index.shape + (3,)).astype(np.uint8)
for i in np.unique(object_index):
if i == 0 or i == -1:
continue
color_mask[object_index == i, :] = np.array(colors[i][:3]) * 255
return color_mask
def build_matrix_of_indices(height, width):
""" Builds a [height, width, 2] numpy array containing coordinates.
@return: 3d array B s.t. B[..., 0] contains y-coordinates, B[..., 1] contains x-coordinates
"""
return np.indices((height, width), dtype=np.float32).transpose(1,2,0)
def visualize_segmentation(im, masks, nc=None, return_rgb=False, save_dir=None):
""" Visualize segmentations nicely. Based on code from:
https://github.com/roytseng-tw/Detectron.pytorch/blob/master/lib/utils/vis.py
@param im: a [H x W x 3] RGB image. numpy array of dtype np.uint8
@param masks: a [H x W] numpy array of dtype np.uint8 with values in {0, ..., K}
@param nc: total number of colors. If None, this will be inferred by masks
"""
from matplotlib.patches import Polygon
masks = masks.astype(int)
im = im.copy()
if not return_rgb:
fig = plt.figure()
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
ax.imshow(im)
# Generate color mask
if nc is None:
NUM_COLORS = masks.max() + 1
else:
NUM_COLORS = nc
cm = plt.get_cmap('gist_rainbow')
colors = [cm(1. * i/NUM_COLORS) for i in range(NUM_COLORS)]
if not return_rgb:
# matplotlib stuff
fig = plt.figure()
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
# Mask
imgMask = np.zeros(im.shape)
# Draw color masks
for i in np.unique(masks):
if i == 0: # background
continue
# Get the color mask
color_mask = np.array(colors[i][:3])
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
e = (masks == i)
# Add to the mask
imgMask[e] = color_mask
# Add the mask to the image
imgMask = (imgMask * 255).round().astype(np.uint8)
im = cv2.addWeighted(im, 0.5, imgMask, 0.5, 0.0)
# Draw mask contours
for i in np.unique(masks):
if i == 0: # background
continue
# Get the color mask
color_mask = np.array(colors[i][:3])
w_ratio = .4
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio
e = (masks == i)
# Find contours
try:
contour, hier = cv2.findContours(
e.astype(np.uint8).copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
except:
im2, contour, hier = cv2.findContours(
e.astype(np.uint8).copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# Plot the nice outline
for c in contour:
if save_dir is None and not return_rgb:
polygon = Polygon(c.reshape((-1, 2)), fill=False, facecolor=color_mask, edgecolor='w', linewidth=1.2, alpha=0.5)
ax.add_patch(polygon)
else:
cv2.drawContours(im, contour, -1, (255,255,255), 2)
if save_dir is None and not return_rgb:
ax.imshow(im)
return fig
elif return_rgb:
return im
elif save_dir is not None:
# Save the image
PIL_image = Image.fromarray(im)
PIL_image.save(save_dir)
return PIL_image
### These two functions were adatped from the DAVIS public dataset ###
def imread_indexed(filename):
""" Load segmentation image (with palette) given filename."""
im = Image.open(filename)
annotation = np.array(im)
return annotation
def imwrite_indexed(filename,array):
""" Save indexed png with palette."""
palette_abspath = '/data/tabletop_dataset_v5/palette.txt' # hard-coded filepath
color_palette = np.loadtxt(palette_abspath, dtype=np.uint8).reshape(-1,3)
if np.atleast_3d(array).shape[2] != 1:
raise Exception("Saving indexed PNGs requires 2D array.")
im = Image.fromarray(array)
im.putpalette(color_palette.ravel())
im.save(filename, format='PNG')
def mask_to_tight_box_numpy(mask):
""" Return bbox given mask
@param mask: a [H x W] numpy array
"""
a = np.transpose(np.nonzero(mask))
bbox = np.min(a[:, 1]), np.min(a[:, 0]), np.max(a[:, 1]), np.max(a[:, 0])
return bbox # x_min, y_min, x_max, y_max
def mask_to_tight_box_pytorch(mask):
""" Return bbox given mask
@param mask: a [H x W] torch tensor
"""
a = torch.nonzero(mask)
bbox = torch.min(a[:, 1]), torch.min(a[:, 0]), torch.max(a[:, 1]), torch.max(a[:, 0])
return bbox # x_min, y_min, x_max, y_max
def mask_to_tight_box(mask):
if type(mask) == torch.Tensor:
return mask_to_tight_box_pytorch(mask)
elif type(mask) == np.ndarray:
return mask_to_tight_box_numpy(mask)
else:
raise Exception("Data type {type(mask)} not understood for mask_to_tight_box...")
| 6,026 |
Python
| 29.75 | 128 | 0.592765 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/munkres.py
|
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Documentation is intended to be processed by Epydoc.
"""
Introduction
============
The Munkres module provides an implementation of the Munkres algorithm
(also called the Hungarian algorithm or the Kuhn-Munkres algorithm),
useful for solving the Assignment Problem.
Assignment Problem
==================
Let *C* be an *n*\ x\ *n* matrix representing the costs of each of *n* workers
to perform any of *n* jobs. The assignment problem is to assign jobs to
workers in a way that minimizes the total cost. Since each worker can perform
only one job and each job can be assigned to only one worker the assignments
represent an independent set of the matrix *C*.
One way to generate the optimal set is to create all permutations of
the indexes necessary to traverse the matrix so that no row and column
are used more than once. For instance, given this matrix (expressed in
Python)::
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
You could use this code to generate the traversal indexes::
def permute(a, results):
if len(a) == 1:
results.insert(len(results), a)
else:
for i in range(0, len(a)):
element = a[i]
a_copy = [a[j] for j in range(0, len(a)) if j != i]
subresults = []
permute(a_copy, subresults)
for subresult in subresults:
result = [element] + subresult
results.insert(len(results), result)
results = []
permute(range(len(matrix)), results) # [0, 1, 2] for a 3x3 matrix
After the call to permute(), the results matrix would look like this::
[[0, 1, 2],
[0, 2, 1],
[1, 0, 2],
[1, 2, 0],
[2, 0, 1],
[2, 1, 0]]
You could then use that index matrix to loop over the original cost matrix
and calculate the smallest cost of the combinations::
n = len(matrix)
minval = sys.maxsize
for row in range(n):
cost = 0
for col in range(n):
cost += matrix[row][col]
minval = min(cost, minval)
print minval
While this approach works fine for small matrices, it does not scale. It
executes in O(*n*!) time: Calculating the permutations for an *n*\ x\ *n*
matrix requires *n*! operations. For a 12x12 matrix, that's 479,001,600
traversals. Even if you could manage to perform each traversal in just one
millisecond, it would still take more than 133 hours to perform the entire
traversal. A 20x20 matrix would take 2,432,902,008,176,640,000 operations. At
an optimistic millisecond per operation, that's more than 77 million years.
The Munkres algorithm runs in O(*n*\ ^3) time, rather than O(*n*!). This
package provides an implementation of that algorithm.
This version is based on
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html.
This version was written for Python by Brian Clapper from the (Ada) algorithm
at the above web site. (The ``Algorithm::Munkres`` Perl version, in CPAN, was
clearly adapted from the same web site.)
Usage
=====
Construct a Munkres object::
from munkres import Munkres
m = Munkres()
Then use it to compute the lowest cost assignment from a cost matrix. Here's
a sample program::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
m = Munkres()
indexes = m.compute(matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total cost: %d' % total
Running that program produces::
Lowest cost through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 0) -> 5
(1, 1) -> 3
(2, 2) -> 4
total cost=12
The instantiated Munkres object can be used multiple times on different
matrices.
Non-square Cost Matrices
========================
The Munkres algorithm assumes that the cost matrix is square. However, it's
possible to use a rectangular matrix if you first pad it with 0 values to make
it square. This module automatically pads rectangular cost matrices to make
them square.
Notes:
- The module operates on a *copy* of the caller's matrix, so any padding will
not be seen by the caller.
- The cost matrix must be rectangular or square. An irregular matrix will
*not* work.
Calculating Profit, Rather than Cost
====================================
The cost matrix is just that: A cost matrix. The Munkres algorithm finds
the combination of elements (one from each row and column) that results in
the smallest cost. It's also possible to use the algorithm to maximize
profit. To do that, however, you have to convert your profit matrix to a
cost matrix. The simplest way to do that is to subtract all elements from a
large value. For example::
from munkres import Munkres, print_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = []
for row in matrix:
cost_row = []
for col in row:
cost_row += [sys.maxsize - col]
cost_matrix += [cost_row]
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Highest profit through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
Running that program produces::
Highest profit through this matrix:
[5, 9, 1]
[10, 3, 2]
[8, 7, 4]
(0, 1) -> 9
(1, 0) -> 10
(2, 2) -> 4
total profit=23
The ``munkres`` module provides a convenience method for creating a cost
matrix from a profit matrix. Since it doesn't know whether the matrix contains
floating point numbers, decimals, or integers, you have to provide the
conversion function; but the convenience method takes care of the actual
creation of the cost matrix::
import munkres
cost_matrix = munkres.make_cost_matrix(matrix,
lambda cost: sys.maxsize - cost)
So, the above profit-calculation program can be recast as::
from munkres import Munkres, print_matrix, make_cost_matrix
matrix = [[5, 9, 1],
[10, 3, 2],
[8, 7, 4]]
cost_matrix = make_cost_matrix(matrix, lambda cost: sys.maxsize - cost)
m = Munkres()
indexes = m.compute(cost_matrix)
print_matrix(matrix, msg='Lowest cost through this matrix:')
total = 0
for row, column in indexes:
value = matrix[row][column]
total += value
print '(%d, %d) -> %d' % (row, column, value)
print 'total profit=%d' % total
References
==========
1. http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*Journal of the Society of Industrial and Applied Mathematics*,
5(1):32-38, March, 1957.
5. http://en.wikipedia.org/wiki/Hungarian_algorithm
Copyright and License
=====================
This software is released under a BSD license, adapted from
<http://opensource.org/licenses/bsd-license.php>
Copyright (c) 2008 Brian M. Clapper
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name "clapper.org" nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
__docformat__ = 'restructuredtext'
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import copy
import numpy as np
# ---------------------------------------------------------------------------
# Exports
# ---------------------------------------------------------------------------
__all__ = ['Munkres', 'make_cost_matrix']
# ---------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------
# Info about the module
__version__ = "1.0.6"
__author__ = "Brian Clapper, [email protected]"
__url__ = "http://software.clapper.org/munkres/"
__copyright__ = "(c) 2008 Brian M. Clapper"
__license__ = "BSD-style license"
# ---------------------------------------------------------------------------
# Classes
# ---------------------------------------------------------------------------
class Munkres:
"""
Calculate the Munkres solution to the classical assignment problem.
See the module documentation for usage.
"""
def __init__(self):
"""Create a new instance"""
self.C = None
self.row_covered = []
self.col_covered = []
self.n = 0
self.Z0_r = 0
self.Z0_c = 0
self.marked = None
self.path = None
def make_cost_matrix(profit_matrix, inversion_function):
"""
**DEPRECATED**
Please use the module function ``make_cost_matrix()``.
"""
import munkres
return munkres.make_cost_matrix(profit_matrix, inversion_function)
make_cost_matrix = staticmethod(make_cost_matrix)
def pad_matrix(self, matrix, pad_value=0):
"""
Pad a possibly non-square matrix to make it square.
:Parameters:
matrix : list of lists
matrix to pad
pad_value : int
value to use to pad the matrix
:rtype: list of lists
:return: a new, possibly padded, matrix
"""
# max_columns = 0
# total_rows = len(matrix)
# for row in matrix:
# max_columns = max(max_columns, len(row))
# total_rows = max(max_columns, total_rows)
# new_matrix = []
# for row in matrix:
# row_len = len(row)
# new_row = row[:]
# if total_rows > row_len:
# # Row too short. Pad it.
# new_row += [0] * (total_rows - row_len)
# new_matrix += [new_row]
# while len(new_matrix) < total_rows:
# new_matrix += [[0] * total_rows]
# return new_matrix
import numpy as np
min_sidelength = np.min(matrix.shape)
max_sidelength = np.max(matrix.shape)
argmin_sidelength = np.argmin(matrix.shape)
if min_sidelength != max_sidelength:
pad_vals = [(0,0), (0,0)]
pad_vals[argmin_sidelength] = (0, max_sidelength - min_sidelength)
new_cm = np.pad(matrix, pad_vals, 'constant')
else:
new_cm = matrix.copy()
return new_cm
def compute(self, cost_matrix):
"""
Compute the indexes for the lowest-cost pairings between rows and
columns in the database. Returns a list of (row, column) tuples
that can be used to traverse the matrix.
:Parameters:
cost_matrix : list of lists
The cost matrix. If this cost matrix is not square, it
will be padded with zeros, via a call to ``pad_matrix()``.
(This method does *not* modify the caller's matrix. It
operates on a copy of the matrix.)
**WARNING**: This code handles square and rectangular
matrices. It does *not* handle irregular matrices.
:rtype: list
:return: A list of ``(row, column)`` tuples that describe the lowest
cost path through the matrix
"""
self.C = self.pad_matrix(cost_matrix)
self.n = len(self.C)
self.original_length = len(cost_matrix)
self.original_width = len(cost_matrix[0])
self.row_covered = [False for i in range(self.n)]
self.col_covered = [False for i in range(self.n)]
self.Z0_r = 0
self.Z0_c = 0
self.path = self.__make_matrix(self.n * 2, 0)
self.marked = self.__make_matrix(self.n, 0)
done = False
step = 1
steps = { 1 : self.__step1,
2 : self.__step2,
3 : self.__step3,
4 : self.__step4,
5 : self.__step5,
6 : self.__step6 }
while not done:
try:
func = steps[step]
step = func()
except KeyError:
done = True
# Look for the starred columns
results = []
for i in range(self.original_length):
for j in range(self.original_width):
if self.marked[i][j] == 1:
results += [(i, j)]
return results
def __copy_matrix(self, matrix):
"""Return an exact copy of the supplied matrix"""
return copy.deepcopy(matrix)
def __make_matrix(self, n, val):
"""Create an *n*x*n* matrix, populating it with the specific value."""
matrix = []
for i in range(n):
matrix += [[val for j in range(n)]]
return matrix
def __step1(self):
"""
For each row of the matrix, find the smallest element and
subtract it from every element in its row. Go to Step 2.
"""
C = self.C
n = self.n
for i in range(n):
minval = min(self.C[i])
# Find the minimum value for this row and subtract that minimum
# from every element in the row.
for j in range(n):
self.C[i][j] -= minval
return 2
def __step2(self):
"""
Find a zero (Z) in the resulting matrix. If there is no starred
zero in its row or column, star Z. Repeat for each element in the
matrix. Go to Step 3.
"""
n = self.n
for i in range(n):
for j in range(n):
if (self.C[i][j] == 0) and \
(not self.col_covered[j]) and \
(not self.row_covered[i]):
self.marked[i][j] = 1
self.col_covered[j] = True
self.row_covered[i] = True
self.__clear_covers()
return 3
def __step3(self):
"""
Cover each column containing a starred zero. If K columns are
covered, the starred zeros describe a complete set of unique
assignments. In this case, Go to DONE, otherwise, Go to Step 4.
"""
n = self.n
count = 0
for i in range(n):
for j in range(n):
if self.marked[i][j] == 1:
self.col_covered[j] = True
count += 1
if count >= n:
step = 7 # done
else:
step = 4
return step
def __step4(self):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
step = 0
done = False
row = -1
col = -1
star_col = -1
while not done:
(row, col) = self.__find_a_zero()
if row < 0:
done = True
step = 6
else:
self.marked[row][col] = 2
star_col = self.__find_star_in_row(row)
if star_col >= 0:
col = star_col
self.row_covered[row] = True
self.col_covered[col] = False
else:
done = True
self.Z0_r = row
self.Z0_c = col
step = 5
return step
def __step5(self):
"""
Construct a series of alternating primed and starred zeros as
follows. Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always
be one). Continue until the series terminates at a primed zero
that has no starred zero in its column. Unstar each starred zero
of the series, star each primed zero of the series, erase all
primes and uncover every line in the matrix. Return to Step 3
"""
count = 0
path = self.path
path[count][0] = self.Z0_r
path[count][1] = self.Z0_c
done = False
while not done:
row = self.__find_star_in_col(path[count][1])
if row >= 0:
count += 1
path[count][0] = row
path[count][1] = path[count-1][1]
else:
done = True
if not done:
col = self.__find_prime_in_row(path[count][0])
count += 1
path[count][0] = path[count-1][0]
path[count][1] = col
self.__convert_path(path, count)
self.__clear_covers()
self.__erase_primes()
return 3
def __step6(self):
"""
Add the value found in Step 4 to every element of each covered
row, and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered
lines.
"""
minval = self.__find_smallest()
for i in range(self.n):
for j in range(self.n):
if self.row_covered[i]:
self.C[i][j] += minval
if not self.col_covered[j]:
self.C[i][j] -= minval
return 4
def __find_smallest(self):
"""Find the smallest uncovered value in the matrix."""
minval = sys.maxsize
for i in range(self.n):
for j in range(self.n):
if (not self.row_covered[i]) and (not self.col_covered[j]):
if minval > self.C[i][j]:
minval = self.C[i][j]
return minval
def __find_a_zero(self):
"""Find the first uncovered element with value 0"""
row = -1
col = -1
i = 0
n = self.n
done = False
while not done:
j = 0
while True:
if (self.C[i][j] == 0) and \
(not self.row_covered[i]) and \
(not self.col_covered[j]):
row = i
col = j
done = True
j += 1
if j >= n:
break
i += 1
if i >= n:
done = True
return (row, col)
def __find_star_in_row(self, row):
"""
Find the first starred element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 1:
col = j
break
return col
def __find_star_in_col(self, col):
"""
Find the first starred element in the specified row. Returns
the row index, or -1 if no starred element was found.
"""
row = -1
for i in range(self.n):
if self.marked[i][col] == 1:
row = i
break
return row
def __find_prime_in_row(self, row):
"""
Find the first prime element in the specified row. Returns
the column index, or -1 if no starred element was found.
"""
col = -1
for j in range(self.n):
if self.marked[row][j] == 2:
col = j
break
return col
def __convert_path(self, path, count):
for i in range(count+1):
if self.marked[path[i][0]][path[i][1]] == 1:
self.marked[path[i][0]][path[i][1]] = 0
else:
self.marked[path[i][0]][path[i][1]] = 1
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
# ---------------------------------------------------------------------------
# Functions
# ---------------------------------------------------------------------------
def make_cost_matrix(profit_matrix, inversion_function):
"""
Create a cost matrix from a profit matrix by calling
'inversion_function' to invert each value. The inversion
function must take one numeric argument (of any type) and return
another numeric argument which is presumed to be the cost inverse
of the original profit.
This is a static method. Call it like this:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, inversion_func)
For example:
.. python::
cost_matrix = Munkres.make_cost_matrix(matrix, lambda x : sys.maxsize - x)
:Parameters:
profit_matrix : list of lists
The matrix to convert from a profit to a cost matrix
inversion_function : function
The function to use to invert each entry in the profit matrix
:rtype: list of lists
:return: The converted matrix
"""
cost_matrix = []
for row in profit_matrix:
cost_matrix.append([inversion_function(value) for value in row])
return cost_matrix
def print_matrix(matrix, msg=None):
"""
Convenience function: Displays the contents of a matrix of integers.
:Parameters:
matrix : list of lists
Matrix to print
msg : str
Optional message to print before displaying the matrix
"""
import math
if msg is not None:
print(msg)
# Calculate the appropriate format width.
width = 0
for row in matrix:
for val in row:
width = max(width, int(math.log10(val)) + 1)
# Make the format string
format = '%%%dd' % width
# Print the matrix
for row in matrix:
sep = '['
for val in row:
sys.stdout.write(sep + format % val)
sep = ', '
sys.stdout.write(']\n')
def munkres_match(sts_true, sts_pred, K):
""" Matches the set of states in sts_pred such that it minimizes the hamming
distance between sts_pred and sts_true. We assume here that the states
are labeled 0, ..., K - 1. This uses the Munkres algorithm to minimize
the hamming distance which is must faster than match_state_seq.
sts_true : A numpy array of integers.
sts_pred : A numpy array of integers.
K : Number of states in case sts_true doesn't cover all states.
"""
sts_true = sts_true.astype('int')
sts_pred = sts_pred.astype('int')
DM = np.zeros((K, K))
for k in xrange(K):
iei = np.where(sts_pred == k)[0]
for l in xrange(K):
n_incorr = np.sum(sts_true[iei] == l)
DM[k,l] = n_incorr
cost_mat = 1 - (DM / np.sum(DM))
m = Munkres()
indexes = m.compute(cost_mat)
return np.array([x[1] for x in indexes])
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == '__main__':
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850), # expected cost
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452), # expected cost
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15)]
m = Munkres()
for cost_matrix, expected_total in matrices:
print_matrix(cost_matrix, msg='cost matrix')
indexes = m.compute(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r][c]
total_cost += x
print('(%d, %d) -> %d' % (r, c, x))
print('lowest cost=%d' % total_cost)
assert expected_total == total_cost
| 26,109 |
Python
| 33.720745 | 82 | 0.548661 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/se3.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import numpy as np
from transforms3d.quaternions import mat2quat, quat2mat, qmult, qinverse
from transforms3d.euler import quat2euler, mat2euler, euler2quat
# RT is a 3x4 matrix
def se3_inverse(RT):
R = RT[0:3, 0:3]
T = RT[0:3, 3].reshape((3,1))
RT_new = np.zeros((3, 4), dtype=np.float32)
RT_new[0:3, 0:3] = R.transpose()
RT_new[0:3, 3] = -1 * np.dot(R.transpose(), T).reshape((3))
return RT_new
def se3_mul(RT1, RT2):
R1 = RT1[0:3, 0:3]
T1 = RT1[0:3, 3].reshape((3,1))
R2 = RT2[0:3, 0:3]
T2 = RT2[0:3, 3].reshape((3,1))
RT_new = np.zeros((3, 4), dtype=np.float32)
RT_new[0:3, 0:3] = np.dot(R1, R2)
T_new = np.dot(R1, T2) + T1
RT_new[0:3, 3] = T_new.reshape((3))
return RT_new
def egocentric2allocentric(qt, T):
dx = np.arctan2(T[0], -T[2])
dy = np.arctan2(T[1], -T[2])
quat = euler2quat(-dy, -dx, 0, axes='sxyz')
quat = qmult(qinverse(quat), qt)
return quat
def allocentric2egocentric(qt, T):
dx = np.arctan2(T[0], -T[2])
dy = np.arctan2(T[1], -T[2])
quat = euler2quat(-dy, -dx, 0, axes='sxyz')
quat = qmult(quat, qt)
return quat
def T_inv_transform(T_src, T_tgt):
'''
:param T_src:
:param T_tgt:
:return: T_delta: delta in pixel
'''
T_delta = np.zeros((3, ), dtype=np.float32)
T_delta[0] = T_tgt[0] / T_tgt[2] - T_src[0] / T_src[2]
T_delta[1] = T_tgt[1] / T_tgt[2] - T_src[1] / T_src[2]
T_delta[2] = np.log(T_src[2] / T_tgt[2])
return T_delta
def rotation_x(theta):
t = theta * np.pi / 180.0
R = np.zeros((3, 3), dtype=np.float32)
R[0, 0] = 1
R[1, 1] = np.cos(t)
R[1, 2] = -np.sin(t)
R[2, 1] = np.sin(t)
R[2, 2] = np.cos(t)
return R
def rotation_y(theta):
t = theta * np.pi / 180.0
R = np.zeros((3, 3), dtype=np.float32)
R[0, 0] = np.cos(t)
R[0, 2] = np.sin(t)
R[1, 1] = 1
R[2, 0] = -np.sin(t)
R[2, 2] = np.cos(t)
return R
def rotation_z(theta):
t = theta * np.pi / 180.0
R = np.zeros((3, 3), dtype=np.float32)
R[0, 0] = np.cos(t)
R[0, 1] = -np.sin(t)
R[1, 0] = np.sin(t)
R[1, 1] = np.cos(t)
R[2, 2] = 1
return R
| 2,351 |
Python
| 24.565217 | 83 | 0.548277 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/augmentation.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import random
import numpy as np
import numbers
from PIL import Image # PyTorch likes PIL instead of cv2
import cv2
# My libraries
from utils import mask as util_
from fcn.config import cfg
##### Useful Utilities #####
def array_to_tensor(array):
""" Converts a numpy.ndarray (N x H x W x C) to a torch.FloatTensor of shape (N x C x H x W)
OR
converts a nump.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W)
"""
if array.ndim == 4: # NHWC
tensor = torch.from_numpy(array).permute(0,3,1,2).float()
elif array.ndim == 3: # HWC
tensor = torch.from_numpy(array).permute(2,0,1).float()
else: # everything else
tensor = torch.from_numpy(array).float()
return tensor
def translate(img, tx, ty, interpolation=cv2.INTER_LINEAR):
""" Translate img by tx, ty
@param img: a [H x W x C] image (could be an RGB image, flow image, or label image)
"""
H, W = img.shape[:2]
M = np.array([[1,0,tx],
[0,1,ty]], dtype=np.float32)
return cv2.warpAffine(img, M, (W, H), flags=interpolation)
def rotate(img, angle, center=None, interpolation=cv2.INTER_LINEAR):
""" Rotate img <angle> degrees counter clockwise w.r.t. center of image
@param img: a [H x W x C] image (could be an RGB image, flow image, or label image)
"""
H, W = img.shape[:2]
if center is None:
center = (W//2, H//2)
M = cv2.getRotationMatrix2D(center, angle, 1)
return cv2.warpAffine(img, M, (W, H), flags=interpolation)
##### Depth Augmentations #####
def add_noise_to_depth(depth_img, noise_params):
""" Distort depth image with multiplicative gamma noise.
This is adapted from the DexNet 2.0 codebase.
Their code: https://github.com/BerkeleyAutomation/gqcnn/blob/75040b552f6f7fb264c27d427b404756729b5e88/gqcnn/sgd_optimizer.py
@param depth_img: a [H x W] set of depth z values
"""
depth_img = depth_img.copy()
# Multiplicative noise: Gamma random variable
multiplicative_noise = np.random.gamma(noise_params['gamma_shape'], noise_params['gamma_scale'])
depth_img = multiplicative_noise * depth_img
return depth_img
def add_noise_to_xyz(xyz_img, depth_img, noise_params):
""" Add (approximate) Gaussian Process noise to ordered point cloud.
This is adapted from the DexNet 2.0 codebase.
@param xyz_img: a [H x W x 3] ordered point cloud
"""
xyz_img = xyz_img.copy()
H, W, C = xyz_img.shape
# Additive noise: Gaussian process, approximated by zero-mean anisotropic Gaussian random variable,
# which is rescaled with bicubic interpolation.
small_H, small_W = (np.array([H, W]) / noise_params['gp_rescale_factor']).astype(int)
additive_noise = np.random.normal(loc=0.0, scale=noise_params['gaussian_scale'], size=(small_H, small_W, C))
additive_noise = cv2.resize(additive_noise, (W, H), interpolation=cv2.INTER_CUBIC)
xyz_img[depth_img > 0, :] += additive_noise[depth_img > 0, :]
return xyz_img
def dropout_random_ellipses(depth_img, noise_params):
""" Randomly drop a few ellipses in the image for robustness.
This is adapted from the DexNet 2.0 codebase.
Their code: https://github.com/BerkeleyAutomation/gqcnn/blob/75040b552f6f7fb264c27d427b404756729b5e88/gqcnn/sgd_optimizer.py
@param depth_img: a [H x W] set of depth z values
"""
depth_img = depth_img.copy()
# Sample number of ellipses to dropout
num_ellipses_to_dropout = np.random.poisson(noise_params['ellipse_dropout_mean'])
# Sample ellipse centers
nonzero_pixel_indices = np.array(np.where(depth_img > 0)).T # Shape: [#nonzero_pixels x 2]
dropout_centers_indices = np.random.choice(nonzero_pixel_indices.shape[0], size=num_ellipses_to_dropout)
dropout_centers = nonzero_pixel_indices[dropout_centers_indices, :] # Shape: [num_ellipses_to_dropout x 2]
# Sample ellipse radii and angles
x_radii = np.random.gamma(noise_params['ellipse_gamma_shape'], noise_params['ellipse_gamma_scale'], size=num_ellipses_to_dropout)
y_radii = np.random.gamma(noise_params['ellipse_gamma_shape'], noise_params['ellipse_gamma_scale'], size=num_ellipses_to_dropout)
angles = np.random.randint(0, 360, size=num_ellipses_to_dropout)
# Dropout ellipses
for i in range(num_ellipses_to_dropout):
center = dropout_centers[i, :]
x_radius = np.round(x_radii[i]).astype(int)
y_radius = np.round(y_radii[i]).astype(int)
angle = angles[i]
# dropout the ellipse
mask = np.zeros_like(depth_img)
mask = cv2.ellipse(mask, tuple(center[::-1]), (x_radius, y_radius), angle=angle, startAngle=0, endAngle=360, color=1, thickness=-1)
depth_img[mask == 1] = 0
return depth_img
##### RGB Augmentations #####
def standardize_image(image):
""" Convert a numpy.ndarray [H x W x 3] of images to [0,1] range, and then standardizes
@return: a [H x W x 3] numpy array of np.float32
"""
image_standardized = np.zeros_like(image).astype(np.float32)
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
for i in range(3):
image_standardized[...,i] = (image[...,i]/255. - mean[i]) / std[i]
return image_standardized
def random_color_warp(image, d_h=None, d_s=None, d_l=None):
""" Given an RGB image [H x W x 3], add random hue, saturation and luminosity to the image
Code adapted from: https://github.com/yuxng/PoseCNN/blob/master/lib/utils/blob.py
"""
H, W, _ = image.shape
image_color_warped = np.zeros_like(image)
# Set random hue, luminosity and saturation which ranges from -0.1 to 0.1
if d_h is None:
d_h = (random.random() - 0.5) * 0.2 * 256
if d_l is None:
d_l = (random.random() - 0.5) * 0.2 * 256
if d_s is None:
d_s = (random.random() - 0.5) * 0.2 * 256
# Convert the RGB to HLS
hls = cv2.cvtColor(image.round().astype(np.uint8), cv2.COLOR_RGB2HLS)
h, l, s = cv2.split(hls)
# Add the values to the image H, L, S
new_h = (np.round((h + d_h)) % 256).astype(np.uint8)
new_l = np.round(np.clip(l + d_l, 0, 255)).astype(np.uint8)
new_s = np.round(np.clip(s + d_s, 0, 255)).astype(np.uint8)
# Convert the HLS to RGB
new_hls = cv2.merge((new_h, new_l, new_s)).astype(np.uint8)
new_im = cv2.cvtColor(new_hls, cv2.COLOR_HLS2RGB)
image_color_warped = new_im.astype(np.float32)
return image_color_warped
def random_horizontal_flip(image, label):
"""Randomly horizontally flip the image/label w.p. 0.5
@param image: a [H x W x 3] numpy array
@param label: a [H x W] numpy array
"""
if random.random() > 0.5:
image = np.fliplr(image).copy()
label = np.fliplr(label).copy()
return image, label
##### Label transformations #####
def random_morphological_transform(label):
""" Randomly erode/dilate the label
@param label: a [H x W] numpy array of {0, 1}
"""
num_tries = 0
valid_transform = False
while not valid_transform:
if num_tries >= cfg.TRAIN.max_augmentation_tries:
print('Morph: Exhausted number of augmentation tries...')
return label
# Sample whether we do erosion or dilation, and kernel size for that
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)
sidelength = np.mean([x_max - x_min, y_max - y_min])
morphology_kernel_size = 0; num_ksize_tries = 0;
while morphology_kernel_size == 0:
if num_ksize_tries >= 50: # 50 tries for this
print('Morph: Exhausted number of augmentation tries... Sidelength: {sidelength}')
return label
dilation_percentage = np.random.beta(cfg.TRAIN.label_dilation_alpha, cfg.TRAIN.label_dilation_beta)
morphology_kernel_size = int(round(sidelength * dilation_percentage))
num_ksize_tries += 1
iterations = np.random.randint(1, cfg.TRAIN.morphology_max_iters+1)
# Erode/dilate the mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (morphology_kernel_size, morphology_kernel_size))
if np.random.rand() < 0.5:
morphed_label = cv2.erode(label, kernel, iterations=iterations)
else:
morphed_label = cv2.dilate(label, kernel, iterations=iterations)
# Make sure there the mass is reasonable
if (np.count_nonzero(morphed_label) / morphed_label.size > 0.001) and \
(np.count_nonzero(morphed_label) / morphed_label.size < 0.98):
valid_transform = True
num_tries += 1
return morphed_label
def random_ellipses(label):
""" Randomly add/drop a few ellipses in the mask
This is adapted from the DexNet 2.0 code.
Their code: https://github.com/BerkeleyAutomation/gqcnn/blob/75040b552f6f7fb264c27d427b404756729b5e88/gqcnn/sgd_optimizer.py
@param label: a [H x W] numpy array of {0, 1}
"""
H, W = label.shape
num_tries = 0
valid_transform = False
while not valid_transform:
if num_tries >= cfg.TRAIN.max_augmentation_tries:
print('Ellipse: Exhausted number of augmentation tries...')
return label
new_label = label.copy()
# Sample number of ellipses to include/dropout
num_ellipses = np.random.poisson(cfg.TRAIN.num_ellipses_mean)
# Sample ellipse centers by sampling from Gaussian at object center
pixel_indices = util_.build_matrix_of_indices(H, W)
h_idx, w_idx = np.where(new_label)
mu = np.mean(pixel_indices[h_idx, w_idx, :], axis=0) # Shape: [2]. y_center, x_center
sigma = 2*np.cov(pixel_indices[h_idx, w_idx, :].T) # Shape: [2 x 2]
if np.any(np.isnan(mu)) or np.any(np.isnan(sigma)):
print(mu, sigma, h_idx, w_idx)
ellipse_centers = np.random.multivariate_normal(mu, sigma, size=num_ellipses) # Shape: [num_ellipses x 2]
ellipse_centers = np.round(ellipse_centers).astype(int)
# Sample ellipse radii and angles
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(new_label)
scale_factor = max(x_max - x_min, y_max - y_min) * cfg.TRAIN.ellipse_size_percentage # Mean of gamma r.v.
x_radii = np.random.gamma(cfg.TRAIN.ellipse_gamma_base_shape * scale_factor,
cfg.TRAIN.ellipse_gamma_base_scale,
size=num_ellipses)
y_radii = np.random.gamma(cfg.TRAIN.ellipse_gamma_base_shape * scale_factor,
cfg.TRAIN.ellipse_gamma_base_scale,
size=num_ellipses)
angles = np.random.randint(0, 360, size=num_ellipses)
# Dropout ellipses
for i in range(num_ellipses):
center = ellipse_centers[i, :]
x_radius = np.round(x_radii[i]).astype(int)
y_radius = np.round(y_radii[i]).astype(int)
angle = angles[i]
# include or dropout the ellipse
mask = np.zeros_like(new_label)
mask = cv2.ellipse(mask, tuple(center[::-1]), (x_radius, y_radius), angle=angle, startAngle=0, endAngle=360, color=1, thickness=-1)
if np.random.rand() < 0.5:
new_label[mask == 1] = 0 # Drop out ellipse
else:
new_label[mask == 1] = 1 # Add ellipse
# Make sure the mass is reasonable
if (np.count_nonzero(new_label) / new_label.size > 0.001) and \
(np.count_nonzero(new_label) / new_label.size < 0.98):
valid_transform = True
num_tries += 1
return new_label
def random_translation(label):
""" Randomly translate mask
@param label: a [H x W] numpy array of {0, 1}
"""
num_tries = 0
valid_transform = False
while not valid_transform:
if num_tries >= cfg.TRAIN.max_augmentation_tries:
print('Translate: Exhausted number of augmentation tries...')
return label
# Get tight bbox of mask
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)
sidelength = max(x_max - x_min, y_max - y_min)
# sample translation pixels
translation_percentage = np.random.beta(cfg.TRAIN.translation_alpha, cfg.TRAIN.translation_beta)
translation_percentage = max(translation_percentage, cfg.TRAIN.translation_percentage_min)
translation_max = int(round(translation_percentage * sidelength))
translation_max = max(translation_max, 1) # To make sure things don't error out
tx = np.random.randint(-translation_max, translation_max)
ty = np.random.randint(-translation_max, translation_max)
translated_label = translate(label, tx, ty, interpolation=cv2.INTER_NEAREST)
# Make sure the mass is reasonable
if (np.count_nonzero(translated_label) / translated_label.size > 0.001) and \
(np.count_nonzero(translated_label) / translated_label.size < 0.98):
valid_transform = True
num_tries += 1
return translated_label
def random_rotation(label):
""" Randomly rotate mask
@param label: a [H x W] numpy array of {0, 1}
"""
H, W = label.shape
num_tries = 0
valid_transform = False
while not valid_transform:
if num_tries >= cfg.TRAIN.max_augmentation_tries:
print('Rotate: Exhausted number of augmentation tries...')
return label
# Rotate about center of box
pixel_indices = util_.build_matrix_of_indices(H, W)
h_idx, w_idx = np.where(label)
mean = np.mean(pixel_indices[h_idx, w_idx, :], axis=0) # Shape: [2]. y_center, x_center
# Sample an angle
applied_angle = np.random.uniform(-cfg.TRAIN.rotation_angle_max, cfg.TRAIN.rotation_angle_max)
rotated_label = rotate(label, applied_angle, center=tuple(mean[::-1]), interpolation=cv2.INTER_NEAREST)
# Make sure the mass is reasonable
if (np.count_nonzero(rotated_label) / rotated_label.size > 0.001) and \
(np.count_nonzero(rotated_label) / rotated_label.size < 0.98):
valid_transform = True
num_tries += 1
return rotated_label
def random_cut(label):
""" Randomly cut part of mask
@param label: a [H x W] numpy array of {0, 1}
"""
H, W = label.shape
num_tries = 0
valid_transform = False
while not valid_transform:
if num_tries >= cfg.TRAIN.max_augmentation_tries:
print('Cut: Exhausted number of augmentation tries...')
return label
cut_label = label.copy()
# Sample cut percentage
cut_percentage = np.random.uniform(cfg.TRAIN.cut_percentage_min, cfg.TRAIN.cut_percentage_max)
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)
if np.random.rand() < 0.5: # choose width
sidelength = x_max - x_min
if np.random.rand() < 0.5: # from the left
x = int(round(cut_percentage * sidelength)) + x_min
cut_label[y_min:y_max+1, x_min:x] = 0
else: # from the right
x = x_max - int(round(cut_percentage * sidelength))
cut_label[y_min:y_max+1, x:x_max+1] = 0
else: # choose height
sidelength = y_max - y_min
if np.random.rand() < 0.5: # from the top
y = int(round(cut_percentage * sidelength)) + y_min
cut_label[y_min:y, x_min:x_max+1] = 0
else: # from the bottom
y = y_max - int(round(cut_percentage * sidelength))
cut_label[y:y_max+1, x_min:x_max+1] = 0
# Make sure the mass is reasonable
if (np.count_nonzero(cut_label) / cut_label.size > 0.001) and \
(np.count_nonzero(cut_label) / cut_label.size < 0.98):
valid_transform = True
num_tries += 1
return cut_label
def random_add(label):
""" Randomly add part of mask
@param label: a [H x W] numpy array of {0, 1}
"""
H, W = label.shape
num_tries = 0
valid_transform = False
while not valid_transform:
if num_tries >= cfg.TRAIN.max_augmentation_tries:
print('Add: Exhausted number of augmentation tries...')
return label
added_label = label.copy()
# Sample add percentage
add_percentage = np.random.uniform(cfg.TRAIN.add_percentage_min, cfg.TRAIN.add_percentage_max)
x_min, y_min, x_max, y_max = util_.mask_to_tight_box(label)
# Sample translation from center
translation_percentage_x = np.random.uniform(0, 2*add_percentage)
tx = int(round( (x_max - x_min) * translation_percentage_x ))
translation_percentage_y = np.random.uniform(0, 2*add_percentage)
ty = int(round( (y_max - y_min) * translation_percentage_y ))
if np.random.rand() < 0.5: # choose x direction
sidelength = x_max - x_min
ty = np.random.choice([-1, 1]) * ty # mask will be moved to the left/right. up/down doesn't matter
if np.random.rand() < 0.5: # mask copied from the left.
x = int(round(add_percentage * sidelength)) + x_min
try:
temp = added_label[y_min+ty : y_max+1+ty, x_min-tx : x-tx]
added_label[y_min+ty : y_max+1+ty, x_min-tx : x-tx] = np.logical_or(temp, added_label[y_min : y_max+1, x_min : x])
except ValueError as e: # indices were out of bounds
num_tries += 1
continue
else: # mask copied from the right
x = x_max - int(round(add_percentage * sidelength))
try:
temp = added_label[y_min+ty : y_max+1+ty, x+tx : x_max+1+tx]
added_label[y_min+ty : y_max+1+ty, x+tx : x_max+1+tx] = np.logical_or(temp, added_label[y_min : y_max+1, x : x_max+1])
except ValueError as e: # indices were out of bounds
num_tries += 1
continue
else: # choose y direction
sidelength = y_max - y_min
tx = np.random.choice([-1, 1]) * tx # mask will be moved up/down. lef/right doesn't matter
if np.random.rand() < 0.5: # from the top
y = int(round(add_percentage * sidelength)) + y_min
try:
temp = added_label[y_min-ty : y-ty, x_min+tx : x_max+1+tx]
added_label[y_min-ty : y-ty, x_min+tx : x_max+1+tx] = np.logical_or(temp, added_label[y_min : y, x_min : x_max+1])
except ValueError as e: # indices were out of bounds
num_tries += 1
continue
else: # from the bottom
y = y_max - int(round(add_percentage * sidelength))
try:
temp = added_label[y+ty : y_max+1+ty, x_min+tx : x_max+1+tx]
added_label[y+ty : y_max+1+ty, x_min+tx : x_max+1+tx] = np.logical_or(temp, added_label[y : y_max+1, x_min : x_max+1])
except ValueError as e: # indices were out of bounds
num_tries += 1
continue
# Make sure the mass is reasonable
if (np.count_nonzero(added_label) / added_label.size > 0.001) and \
(np.count_nonzero(added_label) / added_label.size < 0.98):
valid_transform = True
num_tries += 1
return added_label
| 19,777 |
Python
| 37.780392 | 143 | 0.601254 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/UOC/lib/utils/blob.py
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""Blob helper functions."""
import torch
import torch.nn as nn
import numpy as np
import cv2
import random
def im_list_to_blob(ims, num_channels):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], num_channels),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
if num_channels == 1:
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im[:,:,np.newaxis]
else:
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
def pad_im(im, factor, value=0):
height = im.shape[0]
width = im.shape[1]
pad_height = int(np.ceil(height / float(factor)) * factor - height)
pad_width = int(np.ceil(width / float(factor)) * factor - width)
if len(im.shape) == 3:
return np.lib.pad(im, ((0, pad_height), (0, pad_width), (0,0)), 'constant', constant_values=value)
elif len(im.shape) == 2:
return np.lib.pad(im, ((0, pad_height), (0, pad_width)), 'constant', constant_values=value)
def unpad_im(im, factor):
height = im.shape[0]
width = im.shape[1]
pad_height = int(np.ceil(height / float(factor)) * factor - height)
pad_width = int(np.ceil(width / float(factor)) * factor - width)
if len(im.shape) == 3:
return im[0:height-pad_height, 0:width-pad_width, :]
elif len(im.shape) == 2:
return im[0:height-pad_height, 0:width-pad_width]
def chromatic_transform(im, label=None, d_h=None, d_s=None, d_l=None):
"""
Given an image array, add the hue, saturation and luminosity to the image
"""
# Set random hue, luminosity and saturation which ranges from -0.1 to 0.1
if d_h is None:
d_h = (np.random.rand(1) - 0.5) * 0.1 * 180
if d_l is None:
d_l = (np.random.rand(1) - 0.5) * 0.2 * 256
if d_s is None:
d_s = (np.random.rand(1) - 0.5) * 0.2 * 256
# Convert the BGR to HLS
hls = cv2.cvtColor(im, cv2.COLOR_BGR2HLS)
h, l, s = cv2.split(hls)
# Add the values to the image H, L, S
new_h = (h + d_h) % 180
new_l = np.clip(l + d_l, 0, 255)
new_s = np.clip(s + d_s, 0, 255)
# Convert the HLS to BGR
new_hls = cv2.merge((new_h, new_l, new_s)).astype('uint8')
new_im = cv2.cvtColor(new_hls, cv2.COLOR_HLS2BGR)
if label is not None:
I = np.where(label > 0)
new_im[I[0], I[1], :] = im[I[0], I[1], :]
return new_im
def add_noise(image, level = 0.1):
# random number
r = np.random.rand(1)
# gaussian noise
if r < 0.9:
row,col,ch= image.shape
mean = 0
noise_level = random.uniform(0, level)
sigma = np.random.rand(1) * noise_level * 256
gauss = sigma * np.random.randn(row,col) + mean
gauss = np.repeat(gauss[:, :, np.newaxis], ch, axis=2)
noisy = image + gauss
noisy = np.clip(noisy, 0, 255)
else:
# motion blur
sizes = [3, 5, 7, 9, 11, 15]
size = sizes[int(np.random.randint(len(sizes), size=1))]
kernel_motion_blur = np.zeros((size, size))
if np.random.rand(1) < 0.5:
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
else:
kernel_motion_blur[:, int((size-1)/2)] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
noisy = cv2.filter2D(image, -1, kernel_motion_blur)
return noisy.astype('uint8')
def add_noise_depth(image, level = 0.1):
row,col,ch= image.shape
noise_level = random.uniform(0, level)
gauss = noise_level * np.random.randn(row,col)
gauss = np.repeat(gauss[:, :, np.newaxis], ch, axis=2)
noisy = image + gauss
return noisy
def add_noise_depth_cuda(image, level = 0.1):
noise_level = random.uniform(0, level)
gauss = torch.randn_like(image) * noise_level
noisy = image + gauss
return noisy
def add_gaussian_noise_cuda(image, level = 0.1):
# gaussian noise
noise_level = random.uniform(0, level)
gauss = torch.randn_like(image) * noise_level
noisy = image + gauss
noisy = torch.clamp(noisy, 0, 1.0)
return noisy
def add_noise_cuda(image, level = 0.1):
# random number
r = np.random.rand(1)
# gaussian noise
if r < 0.8:
noise_level = random.uniform(0, level)
gauss = torch.randn_like(image) * noise_level
noisy = image + gauss
noisy = torch.clamp(noisy, 0, 1.0)
else:
# motion blur
sizes = [3, 5, 7, 9, 11, 15]
size = sizes[int(np.random.randint(len(sizes), size=1))]
kernel_motion_blur = torch.zeros((size, size))
if np.random.rand(1) < 0.5:
kernel_motion_blur[int((size-1)/2), :] = torch.ones(size)
else:
kernel_motion_blur[:, int((size-1)/2)] = torch.ones(size)
kernel_motion_blur = kernel_motion_blur.cuda() / size
kernel_motion_blur = kernel_motion_blur.view(1, 1, size, size)
kernel_motion_blur = kernel_motion_blur.repeat(image.size(2), 1, 1, 1)
motion_blur_filter = nn.Conv2d(in_channels=image.size(2),
out_channels=image.size(2),
kernel_size=size,
groups=image.size(2),
bias=False,
padding=int(size/2))
motion_blur_filter.weight.data = kernel_motion_blur
motion_blur_filter.weight.requires_grad = False
noisy = motion_blur_filter(image.permute(2, 0, 1).unsqueeze(0))
noisy = noisy.squeeze(0).permute(1, 2, 0)
return noisy
| 6,632 |
Python
| 33.367875 | 106 | 0.579011 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/lvis.py
|
import json
import numpy as np
import os
from PIL import Image
class Lvis(object):
def __init__(self,
json_path="E:/dataset/lvis/split_train/lvis_common_category.json",
image_folders=["E:/dataset/lvis/train2017", "E:/dataset/lvis/val2017", "E:/dataset/lvis/test2017"]):
self.json_path = json_path
self.image_folders = image_folders
with open(self.json_path, 'r') as f:
data = json.load(f)
self.data = data
self.cat_ids = list(self.data.keys())
self.cat_num = len(self.cat_ids)
names = []
for cat_id in self.cat_ids:
cat_name = self.data[cat_id]['name']
names.append(cat_name)
self.cat_names = names
def random_images(self, index, num):
cat_id = self.cat_ids[index]
cat_name = self.data[cat_id]['name']
bbox_num = len(self.data[cat_id]['bboxes'])
bbox_list = [i for i in range(bbox_num)]
if bbox_num < num:
num = bbox_num
sample_ids = np.random.choice(bbox_list, num, replace=False)
images = []
for sample_id in sample_ids:
image_id = self.data[cat_id]['images'][sample_id]
x,y,w,h = self.data[cat_id]['bboxes'][sample_id]
for folder in self.image_folders:
image_path = os.path.join( folder, "%012d.jpg" % image_id )
if os.path.exists(image_path):
break
img = Image.open( image_path )
# crop_img = img.crop([x, y, x+w, y+h])
# images.append(crop_img)
images.append(img)
return images, cat_name
def random_test(self, task_num, img_num_pre_task):
index_list = [ i for i in range(self.cat_num) ]
for i in range(task_num):
source_list = []
target_list = []
label_list = []
sample_ids = np.random.choice(index_list, img_num_pre_task, replace=False)
for cat_id in sample_ids:
images, cat_name = self.random_images(cat_id, 2)
source_list.append(images[0])
target_list.append(images[1])
label_list.append(cat_name)
yield source_list, target_list, label_list
if __name__ == '__main__':
dataset = Lvis()
np.random.seed(6)
images, cat_name = dataset.random_images(9, 2)
a = 1
| 2,494 |
Python
| 28.702381 | 108 | 0.523256 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/matcher.py
|
import torch
import clip
from scipy.optimize import linear_sum_assignment
from torchvision.transforms import Compose, ToTensor, Normalize
import numpy as np
import cv2
def resize(img, size=(224,224)):
img = cv2.resize(img, size, interpolation=cv2.INTER_CUBIC)
# img = center_crop(img, size)
return img
def center_crop(img, dim):
"""Returns center cropped image
Args:
img: image to be center cropped
dim: dimensions (width, height) to be cropped
"""
width, height = img.shape[1], img.shape[0]
# process crop width and height for max available dimension
crop_width = dim[0] if dim[0]<img.shape[1] else img.shape[1]
crop_height = dim[1] if dim[1]<img.shape[0] else img.shape[0]
mid_x, mid_y = int(width/2), int(height/2)
cw2, ch2 = int(crop_width/2), int(crop_height/2)
crop_img = img[mid_y-ch2:mid_y+ch2, mid_x-cw2:mid_x+cw2]
return crop_img
def _transform():
return Compose([
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
class VisualMatcher(object):
def __init__(self) -> None:
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
self.model = model
self.preprocess = preprocess
self.device = device
def convert_images(self, images):
data = []
for image in images:
img_tensor = self.preprocess(image).unsqueeze(0).to(self.device)
data.append(img_tensor)
data = torch.cat(data, dim=0)
return data
def convert_rgb_arr(self, images):
data = []
for image in images:
image = resize(image)
img_tensor = _transform()(image).unsqueeze(0).to(self.device)
data.append(img_tensor)
data = torch.cat(data, dim=0)
return data
def convert_texts(self, object_labels):
texts = []
for label in object_labels:
texts.append( f"A picture of a {label}" )
data = clip.tokenize(texts).to(self.device)
return data
def match_images(self, source_images, target_images, object_labels, use_text=True):
with torch.no_grad():
if type(source_images[0]) == np.ndarray:
convert_fn = self.convert_rgb_arr
else:
convert_fn = self.convert_images
source_data = convert_fn(source_images)
target_data = convert_fn(target_images)
source_features = self.model.encode_image(source_data)
target_features = self.model.encode_image(target_data)
source_features /= source_features.norm(dim=-1, keepdim=True)
target_features /= target_features.norm(dim=-1, keepdim=True)
if use_text:
text_data = self.convert_texts(object_labels)
text_features = self.model.encode_text(text_data)
text_features /= text_features.norm(dim=-1, keepdim=True)
source_text = (100.0 * source_features @ text_features.T).softmax(dim=-1)
target_text = (100.0 * target_features @ text_features.T).softmax(dim=-1)
source_target = (100.0 * source_text @ target_text.T).softmax(dim=-1).cpu().numpy()
else:
source_target = (100.0 * source_features @ target_features.T).softmax(dim=-1).cpu().numpy()
# https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.linear_sum_assignment.html
source_target = -source_target
source_ids, target_ids = linear_sum_assignment(source_target)
return source_ids, target_ids
def match(self, source_rgb, source_dep, target_rgb, target_dep, texts ):
# UOC app
pass
| 3,366 |
Python
| 30.467289 | 105 | 0.68984 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/clip_test/predict.py
|
import os
import clip
import torch
from torchvision.datasets import CIFAR100
# Load the model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load('ViT-B/32', device)
# Download the dataset
cifar100 = CIFAR100(root=os.path.expanduser("./.cache"), download=True, train=False)
# Prepare the inputs
image, class_id = cifar100[3637]
image_input = preprocess(image).unsqueeze(0).to(device)
text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in cifar100.classes]).to(device)
# Calculate features
with torch.no_grad():
image_features = model.encode_image(image_input)
text_features = model.encode_text(text_inputs)
# Pick the top 5 most similar labels for the image
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity = (100.0 * image_features @ text_features.T).softmax(dim=-1)
values, indices = similarity[0].topk(5)
# Print the result
print("\nTop predictions:\n")
for value, index in zip(values, indices):
print(f"{cifar100.classes[index]:>16s}: {100 * value.item():.2f}%")
| 1,113 |
Python
| 33.812499 | 98 | 0.727763 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/clip_test/test.py
|
import torch
import clip
from PIL import Image
import cv2
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
image_path = "./images/CLIP.png"
image_path = 'E:/dataset/lvis/val2017/000000454750.jpg'
image_path = 'E:/dataset/lvis/train2017/000000206427.jpg'
image_path = "./a.png"
texts = ["a zebra", 'artichoke', "a dog", "a diagram"]
image = preprocess(Image.open(image_path)).unsqueeze(0).to(device)
text = clip.tokenize(texts).to(device)
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
logits_per_image, logits_per_text = model(image, text)
probs = logits_per_image.softmax(dim=-1).cpu().numpy()
print("Label probs:", probs) # prints: [[0.9927937 0.00421068 0.00299572]]
| 821 |
Python
| 29.444443 | 76 | 0.699147 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/data/organize_by_cat.py
|
import json
import os
json_path = "./split_train/lvis_common.json"
save_path = "./split_train/lvis_common_category.json"
# image_folder = "./val2017"
with open(json_path, 'r') as f:
data = json.load(f)
cats = data['categories']
annos = data['annotations']
images = data['images']
common_data = {}
all_valid_ids = []
for cat in cats:
# common, freq, rare
if cat['frequency'] == 'c' and cat['image_count'] > 1:
all_valid_ids.append(cat['id'])
common_data[ cat['id'] ] = {
'name': cat['name'],
'synset': cat['synset'],
'images': [],
'bboxes': []
}
for anno in annos:
cat_id = anno['category_id']
if cat_id in all_valid_ids and anno['area'] > 32*32:
# image_path = os.path.join( image_folder, '%012d.jpg' % anno['image_id'] )
# if os.path.exists(image_path):
if True:
common_data[cat_id]['images'].append(
anno['image_id']
)
common_data[cat_id]['bboxes'].append(
anno['bbox']
)
# remove empty list
cat_ids = list(common_data.keys())
for cat_id in cat_ids:
if len(common_data[cat_id]['images']) < 10:
common_data.pop(cat_id)
print(len(common_data.keys()))
with open(save_path, "w") as fp:
json.dump(common_data, fp)
| 1,342 |
Python
| 23.87037 | 83 | 0.544709 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/data/split.py
|
import argparse
import json
import os
# https://github.com/ucbdrive/few-shot-object-detection/blob/master/datasets/split_lvis_annotation.py
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data",
type=str,
default="data/lvis_v1_train.json",
help="path to the annotation file",
)
parser.add_argument(
"--save-dir",
type=str,
default="split_train",
help="path to the save directory",
)
args = parser.parse_args()
return args
def split_annotation(args):
with open(args.data) as fp:
ann_train = json.load(fp)
for s, name in [("f", "freq"), ("c", "common"), ("r", "rare")]:
ann_s = {
"info": ann_train["info"],
# 'images': ann_train['images'],
"categories": ann_train["categories"],
"licenses": ann_train["licenses"],
}
ids = [
cat["id"]
for cat in ann_train["categories"]
if cat["frequency"] == s
]
ann_s["annotations"] = [
ann
for ann in ann_train["annotations"]
if ann["category_id"] in ids
]
img_ids = set([ann["image_id"] for ann in ann_s["annotations"]])
new_images = [
img for img in ann_train["images"] if img["id"] in img_ids
]
ann_s["images"] = new_images
save_path = os.path.join(
args.save_dir, "lvis_{}.json".format(name)
)
print("Saving {} annotations to {}.".format(name, save_path))
with open(save_path, "w") as fp:
json.dump(ann_s, fp)
if __name__ == "__main__":
args = parse_args()
split_annotation(args)
| 1,737 |
Python
| 27.032258 | 101 | 0.52274 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/data/README.md
|
## 数据集
来自 LVIS
> https://www.lvisdataset.org/dataset
## split.py
来自网上,这个数据集的类别根据频率(frequency)分为3种:common、frequency、rare,这个代码就是把json文件中的3个给分开
> https://github.com/ucbdrive/few-shot-object-detection/blob/master/datasets/split_lvis_annotation.py
## organize_by_cat.py
因为我想随机采样特定类物体的图片,就将json的数据按照类别整合,结构大概如下
'''
{
'xxxx cat_id': {
name:
images: [
xxx,
xxx
]
'bbox': [
xxx,
xxx
]
}
}
'''
然后我只把 common 类的物体取出来保存,都在 `split` 内
| 575 |
Markdown
| 16.999999 | 101 | 0.54087 |
SZU-AdvTech-2022/371-Semantically-Grounded-Object-Matching-for-Robust-Robotic-Scene-Rearrangement/VM/data/load_cat.py
|
import json
import numpy as np
import os
from PIL import Image
class Lvis(object):
def __init__(self, json_path, image_folders) -> None:
self.json_path = json_path
self.image_folders = image_folders
with open(self.json_path, 'r') as f:
data = json.load(f)
self.data = data
self.cat_ids = list(self.data.keys())
self.cat_num = len(self.cat_ids)
print(self.cat_num)
def random_images(self, index, num):
cat_id = self.cat_ids[index]
cat_name = self.data[cat_id]['name']
bbox_num = len(self.data[cat_id]['bboxes'])
bbox_list = [i for i in range(bbox_num)]
if bbox_num < num:
num = bbox_num
sample_ids = np.random.choice(bbox_list, num, replace=False)
images = []
for sample_id in sample_ids:
image_id = self.data[cat_id]['images'][sample_id]
x,y,w,h = self.data[cat_id]['bboxes'][sample_id]
for folder in self.image_folders:
image_path = os.path.join( folder, "%012d.jpg" % image_id )
if os.path.exists(image_path):
break
img = Image.open( image_path )
crop_img = img.crop([x, y, x+w, y+h])
images.append(crop_img)
return images, cat_name
if __name__ == '__main__':
json_path = "./split_val/lvis_common_category.json"
image_folders = ["./train2017", "./val2017", "./test2017"]
dataset = Lvis(json_path, image_folders)
np.random.seed(6)
images, cat_name = dataset.random_images(3, 2)
print(cat_name)
a = 1
| 1,686 |
Python
| 27.116666 | 75 | 0.533215 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/sfcontrols.py
|
import omni.kit.commands as okc
import omni.usd
import time
import datetime
import json
import socket
import psutil
from pxr import Gf, Sdf, Usd, UsdGeom, UsdShade
from .ovut import MatMan, delete_if_exists, write_out_syspath, truncf
from .spheremesh import SphereMeshFactory
from .sphereflake import SphereFlakeFactory
import nvidia_smi
# import multiprocessing
import subprocess
from omni.services.core import main
import os
from .ovut import get_setting, save_setting
# import asyncio
# fflake8: noqa
def build_sf_set(sx: int = 0, nx: int = 1, nnx: int = 1,
sy: int = 0, ny: int = 1, nny: int = 1,
sz: int = 0, nz: int = 1, nnz: int = 1,
matname: str = "Mirror"):
# to test open a browser at http://localhost:8211/docs or 8011 or maybe 8111
stageid = omni.usd.get_context().get_stage_id()
pid = os.getpid()
msg = f"build_sf_set - x: {sx} {nx} {nnx} - y: {sy} {ny} {nny} - z: {sz} {nz} {nnz} mat:{matname}"
msg += f" - stageid: {stageid} pid:{pid}"
print(msg)
matman = MatMan()
smf = SphereMeshFactory(matman)
sff = SphereFlakeFactory(matman, smf)
sff.p_sf_matname = matname
sff.p_nsfx = nnx
sff.p_nsfy = nny
sff.p_nsfz = nnz
# sff.GenerateManySubcube(sx, sy, sz, nx, ny, nz)
return msg
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class SfControls():
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
_stage = None
_total_quads: int = 0
_matman: MatMan = None
_floor_xdim = 5
_floor_zdim = 5
_bounds_visible = False
_sf_size = 50
_vsc_test8 = False
sfw = None # We can't give this a type because it would be a circular reference
p_writelog = True
p_logseriesname = "None"
def __init__(self, matman: MatMan, smf: SphereMeshFactory, sff: SphereFlakeFactory):
print("SfControls __init__ (trc)")
self._matman = matman
self._count = 0
self._current_material_name = "Mirror"
self._current_alt_material_name = "Red_Glass"
self._current_bbox_material_name = "Blue_Glass"
self._current_floor_material_name = "Mirror"
self._matkeys = self._matman.GetMaterialNames()
self._total_quads = 0
self._sf_size = 50
# self._sf_matbox: ui.ComboBox = None
self._prims = ["Sphere", "Cube", "Cone", "Torus", "Cylinder", "Plane", "Disk", "Capsule",
"Billboard", "SphereMesh"]
self._curprim = self._prims[0]
self._sf_gen_modes = SphereFlakeFactory.GetGenModes()
self._sf_gen_mode = self._sf_gen_modes[0]
self._sf_gen_forms = SphereFlakeFactory.GetGenForms()
self._sf_gen_form = self._sf_gen_forms[0]
# self._genmodebox = ui.ComboBox(0, *self._sf_gen_modes).model
# self._genformbox = ui.ComboBox(0, *self._sf_gen_forms).model
self.smf = smf
self.sff = sff
self._write_out_syspath = False
if self._write_out_syspath:
write_out_syspath()
self.LoadSettings()
def LateInit(self):
# Register endpoints
try:
# there seems to be no main until a window is created
main.register_endpoint("get", "/sphereflake/build-sf-set", build_sf_set, tags=["Sphereflakes"])
except Exception as e:
print(f"Exception registering endpoint: {e}")
def Close(self):
try:
main.deregister_endpoint("get", "/sphereflake/build-sf-set")
except Exception as e:
print(f"Exception deregistering endpoint: {e}")
print("SfControls close")
def SaveSettings(self):
print("SfControls SaveSettings (trc)")
self.query_write_log()
save_setting("write_log", self.p_writelog)
save_setting("log_series_name", self.p_logseriesname)
def LoadSettings(self):
print("SfControls LoadSettings (trc)")
self.p_writelog = get_setting("write_log", True)
self.p_logseriesname = get_setting("log_series_name", "None")
def setup_environment(self, extent3f: Gf.Vec3f, force: bool = False):
ppathstr = "/World/Floor"
if force:
delete_if_exists(ppathstr)
prim_path_sdf = Sdf.Path(ppathstr)
prim: Usd.Prim = self._stage .GetPrimAtPath(prim_path_sdf)
if not prim.IsValid():
okc.execute('CreateMeshPrimWithDefaultXform', prim_type="Plane", prim_path=ppathstr)
floormatname = self.get_curfloormat_name()
# omni.kit.commands.execute('BindMaterialCommand', prim_path='/World/Floor',
# material_path=f'/World/Looks/{floormatname}')
mtl = self._matman.GetMaterial(floormatname)
stage = omni.usd.get_context().get_stage()
prim: Usd.Prim = stage.GetPrimAtPath(ppathstr)
UsdShade.MaterialBindingAPI(prim).Bind(mtl)
# self._floor_xdim = extent3f[0] / 10
# self._floor_zdim = extent3f[2] / 10
self._floor_xdim = extent3f[0] / 100
self._floor_zdim = extent3f[2] / 100
okc.execute('TransformMultiPrimsSRTCpp',
count=1,
paths=[ppathstr],
new_scales=[self._floor_xdim, 1, self._floor_zdim])
baseurl = 'https://omniverse-content-production.s3.us-west-2.amazonaws.com'
okc.execute('CreateDynamicSkyCommand',
sky_url=f'{baseurl}/Assets/Skies/2022_1/Skies/Dynamic/CumulusLight.usd',
sky_path='/Environment/sky')
# print(f"nvidia_smi.__file__:{nvidia_smi.__file__}")
# print(f"omni.ui.__file__:{omni.ui.__file__}")
# print(f"omni.ext.__file__:{omni.ext.__file__}")
def ensure_stage(self):
# print("ensure_stage")
self._stage = omni.usd.get_context().get_stage()
# if self._stage is None:
# self._stage = omni.usd.get_context().get_stage()
# # print(f"ensure_stage got stage:{self._stage}")
# UsdGeom.SetStageUpAxis(self._stage, UsdGeom.Tokens.y)
# self._total_quads = 0
# extent3f = self.sff.GetSphereFlakeBoundingBox()
# self.setup_environment(extent3f)
def create_billboard(self, primpath: str, w: float = 860, h: float = 290):
UsdGeom.SetStageUpAxis(self._stage, UsdGeom.Tokens.y)
billboard = UsdGeom.Mesh.Define(self._stage, primpath)
w2 = w/2
h2 = h/2
pts = [(-w2, -h2, 0), (w2, -h2, 0), (w2, h2, 0), (-w2, h2, 0)]
ext = [(-w2, -h2, 0), (w2, h2, 0)]
billboard.CreatePointsAttr(pts)
billboard.CreateFaceVertexCountsAttr([4])
billboard.CreateFaceVertexIndicesAttr([0, 1, 2, 3])
billboard.CreateExtentAttr(ext)
texCoords = UsdGeom.PrimvarsAPI(billboard).CreatePrimvar("st", Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.varying)
texCoords.Set([(0, 0), (1, 0), (1, 1), (0, 1)])
return billboard
self.ensure_stage()
# Tore:
# Remove _sf_size into smf (and sff?)
# def get_bool_model(self, option_name: str):
# bool_model = ui.SimpleBoolModel()
# return bool_model
def toggle_write_log(self):
self.p_writelog = not self.p_writelog
print(f"toggle_write_log is now:{self.p_writelog}")
def query_write_log(self):
self.p_writelog = self.sfw.writelog_checkbox_model.as_bool
self.p_logseriesname = self.sfw.writelog_seriesname_model.as_string
print(f"querey_write_log is now:{self.p_writelog} name:{self.p_logseriesname}")
def toggle_bounds(self):
self.ensure_stage()
self._bounds_visible = not self._bounds_visible
self.sfw._tog_bounds_but.text = f"Bounds:{self._bounds_visible}"
self.sff.ToggleBoundsVisiblity()
def on_click_billboard(self):
self.ensure_stage()
primpath = f"/World/Prim_Billboard_{self._count}"
billboard = self.create_billboard(primpath)
material = self.get_curmat_mat()
UsdShade.MaterialBindingAPI(billboard).Bind(material)
def on_click_spheremesh(self):
self.ensure_stage()
self.smf.GenPrep()
matname = self.get_curmat_name()
cpt = Gf.Vec3f(0, self._sf_size, 0)
primpath = f"/World/SphereMesh_{self._count}"
self._count += 1
self.smf.CreateMesh(primpath, matname, cpt, self._sf_size)
def update_radratio(self):
if self.sfw._sf_radratio_slider_model is not None:
val = self.sfw._sf_radratio_slider_model.as_float
self.sff.p_radratio = val
def on_click_sphereflake(self):
self.ensure_stage()
start_time = time.time()
sff = self.sff
sff.p_genmode = self.get_sf_genmode()
sff.p_genform = self.get_sf_genform()
sff.p_rad = self._sf_size
# print(f"slider: {type(self._sf_radratio_slider)}")
# sff._radratio = self._sf_radratio_slider.get_value_as_float()
self.update_radratio()
sff.p_sf_matname = self.get_curmat_name()
sff.p_sf_alt_matname = self.get_curaltmat_name()
sff.p_bb_matname = self.get_curmat_bbox_name()
cpt = Gf.Vec3f(0, self._sf_size, 0)
primpath = f"/World/SphereFlake_{self._count}"
self._count += 1
sff.Generate(primpath, cpt)
elap = time.time() - start_time
self.sfw._statuslabel.text = f"SphereFlake took elapsed: {elap:.2f} s"
self.UpdateStuff()
async def generate_sflakes(self):
sff = self.sff
sff._matman = self._matman
sff.p_genmode = self.get_sf_genmode()
sff.p_genform = self.get_sf_genform()
sff.p_rad = self._sf_size
self.update_radratio()
sff.p_sf_matname = self.get_curmat_name()
sff.p_sf_alt_matname = self.get_curaltmat_name()
sff.p_make_bounds_visible = self._bounds_visible
sff.p_bb_matname = self.get_curmat_bbox_name()
if sff.p_parallelRender:
await sff.GenerateManyParallel()
new_count = sff.p_nsfx*sff.p_nsfy*sff.p_nsfz
else:
new_count = sff.GenerateMany()
self._count += new_count
sff.SaveSettings()
def write_log(self, elap: float = 0.0):
self.query_write_log()
if self.p_writelog:
nflakes = self.sff.p_nsfx * self.sff.p_nsfz
ntris, nprims = self.sff.CalcTrisAndPrims()
gpuinfo = self._gpuinfo
om = float(1024*1024*1024)
hostname = socket.gethostname()
memused = psutil.virtual_memory().used
memtot = psutil.virtual_memory().total
memfree = psutil.virtual_memory().free
cores = psutil.cpu_count()
# msg = f"GPU Mem tot: {gpuinfo.total/om:.2f}: used: {gpuinfo.used/om:.2f} free: {gpuinfo.free/om:.2f} GB"
# msg += f"\nCPU cores: {cores}"
# msg += f"\nSys Mem tot: {memtot/om:.2f}: used: {memused/om:.2f} free: {memfree/om:.2f} GB"
rundict = {"0-seriesname": self.p_logseriesname,
"0-hostname": hostname,
"0-date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"1-genmode": self.sff.p_genmode,
"1-genform": self.sff.p_genform,
"1-depth": self.sff.p_depth,
"1-rad": self.sff.p_rad,
"1-radratio": self.sff.p_radratio,
"1-nsfx": self.sff.p_nsfx,
"1-nsfy": self.sff.p_nsfy,
"1-nsfz": self.sff.p_nsfz,
"2-tris": ntris,
"2-prims": nprims,
"2-nflakes": nflakes,
"2-elapsed": truncf(elap, 3),
"3-gpu_gbmem_tot": truncf(gpuinfo.total/om, 3),
"3-gpu_gbmem_used": truncf(gpuinfo.used/om, 3),
"3-gpu_gbmem_free": truncf(gpuinfo.free/om, 3),
"4-sys_gbmem_tot": truncf(memtot/om, 3),
"4-sys_gbmem_used": truncf(memused/om, 3),
"4-sys_gbmem_free": truncf(memfree/om, 3),
"5-cpu_cores": cores,
}
self.WriteRunLog(rundict)
async def on_click_multi_sphereflake(self):
self.ensure_stage()
# extent3f = self.sff.GetSphereFlakeBoundingBox()
extent3f = self.sff.GetSphereFlakeBoundingBoxNxNyNz()
self.setup_environment(extent3f, force=True)
start_time = time.time()
await self.generate_sflakes()
elap = time.time() - start_time
nflakes = self.sff.p_nsfx * self.sff.p_nsfz
self.sfw._statuslabel.text = f"{nflakes} flakes took elapsed: {elap:.2f} s"
self.UpdateStuff()
self.write_log(elap)
def spawnprim(self, primtype):
self.ensure_stage()
extent3f = self.sff.GetSphereFlakeBoundingBox()
self.setup_environment(extent3f, force=True)
if primtype == "Billboard":
self.on_click_billboard()
return
elif primtype == "SphereMesh":
self.on_click_spheremesh()
return
primpath = f"/World/Prim_{primtype}_{self._count}"
okc.execute('CreateMeshPrimWithDefaultXform', prim_type=primtype, prim_path=primpath)
material = self.get_curmat_mat()
self._count += 1
okc.execute('TransformMultiPrimsSRTCpp',
count=1,
paths=[primpath],
new_scales=[1, 1, 1],
new_translations=[0, 50, 0])
prim: Usd.Prim = self._stage.GetPrimAtPath(primpath)
UsdShade.MaterialBindingAPI(prim).Bind(material)
def on_click_writerunlog(self):
self.p_writelog = not self.p_writelog
self.sfw._sf_writerunlog_but.text = f"Write Perf Log: {self.p_writelog}"
def round_increment(self, val: int, butval: bool, maxval: int, minval: int = 0):
inc = 1 if butval else -1
val += inc
if val > maxval:
val = minval
if val < minval:
val = maxval
return val
def UpdateStuff(self):
self.UpdateNQuads()
self.UpdateMQuads()
self.UpdateGpuMemory()
def on_click_sfdepth(self, x, y, button, modifier):
depth = self.round_increment(self.sff.p_depth, button == 1, 5, 0)
self.sfw._sf_depth_but.text = f"Depth:{depth}"
self.sff.p_depth = depth
self.UpdateStuff()
def on_click_nlat(self, x, y, button, modifier):
nlat = self.round_increment(self.smf.p_nlat, button == 1, 16, 3)
self._sf_nlat_but.text = f"Nlat:{nlat}"
self.smf.p_nlat = nlat
self.UpdateStuff()
def on_click_nlng(self, x, y, button, modifier):
nlng = self.round_increment(self.smf.p_nlng, button == 1, 16, 3)
self._sf_nlng_but.text = f"Nlng:{nlng}"
self.smf.p_nlng = nlng
self.UpdateStuff()
def on_click_sfx(self, x, y, button, modifier):
nsfx = self.round_increment(self.sff.p_nsfx, button == 1, 20, 1)
self.sfw._nsf_x_but.text = f"SF - x:{nsfx}"
self.sff.p_nsfx = nsfx
self.UpdateStuff()
def toggle_parallel_render(self):
self.sff.p_parallelRender = not self.sff.p_parallelRender
self.sfw._parallel_render_but.text = f"Parallel Render: {self.sff.p_parallelRender}"
def on_click_parallel_nxbatch(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_parallel_nxbatch, button == 1, self.sff.p_nsfx, 1)
self.sfw._parallel_nxbatch_but.text = f"SF batch x: {tmp}"
self.sff.p_parallel_nxbatch = tmp
print(f"on_click_parallel_nxbatch:{tmp}")
self.UpdateStuff()
def on_click_parallel_nybatch(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_parallel_nybatch, button == 1, self.sff.p_nsfy, 1)
self.sfw._parallel_nybatch_but.text = f"SF batch y: {tmp}"
self.sff.p_parallel_nybatch = tmp
self.UpdateStuff()
def on_click_parallel_nzbatch(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_parallel_nzbatch, button == 1, self.sff.p_nsfz, 1)
self.sfw._parallel_nzbatch_but.text = f"SF batch z: {tmp}"
self.sff.p_parallel_nzbatch = tmp
self.UpdateStuff()
def toggle_partial_render(self):
self.sff.p_partialRender = not self.sff.p_partialRender
self.sfw._partial_render_but.text = f"Partial Render: {self.sff.p_partialRender}"
def on_click_parital_sfsx(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_partial_ssfx, button == 1, self.sff.p_nsfx-1, 0)
self.sfw._part_nsf_sx_but.text = f"SF partial sx: {tmp}"
self.sff.p_partial_ssfx = tmp
self.UpdateStuff()
def on_click_parital_sfsy(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_partial_ssfy, button == 1, self.sff.p_nsfy-1, 0)
self.sfw._part_nsf_sy_but.text = f"SF partial sy: {tmp}"
self.sff.p_partial_ssfy = tmp
self.UpdateStuff()
def on_click_parital_sfsz(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_partial_ssfz, button == 1, self.sff.p_nsfz-1, 0)
self.sfw._part_nsf_sz_but.text = f"SF partial sz: {tmp}"
self.sff.p_partial_ssfz = tmp
self.UpdateStuff()
def on_click_parital_sfnx(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_partial_nsfx, button == 1, self.sff.p_nsfx, 1)
self.sfw._part_nsf_nx_but.text = f"SF partial nx: {tmp}"
self.sff.p_partial_nsfx = tmp
self.UpdateStuff()
def on_click_parital_sfny(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_partial_nsfy, button == 1, self.sff.p_nsfy, 1)
self.sfw._part_nsf_ny_but.text = f"SF partial ny: {tmp}"
self.sff.p_partial_nsfy = tmp
self.UpdateStuff()
def on_click_parital_sfnz(self, x, y, button, modifier):
tmp = self.round_increment(self.sff.p_partial_nsfz, button == 1, self.sff.p_nsfz, 1)
self.sfw._part_nsf_nz_but.text = f"SF partial nz: {tmp}"
self.sff.p_partial_nsfz = tmp
self.UpdateStuff()
def on_click_sfy(self, x, y, button, modifier):
nsfy = self.round_increment(self.sff.p_nsfy, button == 1, 20, 1)
self.sfw._nsf_y_but.text = f"SF - y:{nsfy}"
self.sff.p_nsfy = nsfy
self.UpdateStuff()
def on_click_sfz(self, x, y, button, modifier):
nsfz = self.round_increment(self.sff.p_nsfz, button == 1, 20, 1)
self.sfw._nsf_z_but.text = f"SF - z:{nsfz}"
self.sff.p_nsfz = nsfz
self.UpdateStuff()
def on_click_spawnprim(self):
self.spawnprim(self._curprim)
def xprocess():
pass
# print("xprocess started")
def on_click_launchxproc(self):
self.ensure_stage()
# cmdpath = "D:\\nv\\ov\\ext\\sphereflake-benchmark\\exts\\omni.sphereflake\\omni\\sphereflake"
subprocess.call(["python.exe"])
# subprocess.call([cmdpath,"hello.py"])
# print("launching xproc")
# p1 = multiprocessing.Process(target=self.xprocess)
# p1.start() # Casues app to stop servicing events
# self._xproc = XProcess(self._stage, self._curprim, self.smf, self.sff)
# self._xproc.start()
def on_click_clearprims(self):
self.ensure_stage()
# check and see what we have missed
worldprim = self._stage.GetPrimAtPath("/World")
for child_prim in worldprim.GetAllChildren():
cname = child_prim.GetName()
prefix = cname.split("_")[0]
dodelete = prefix in ["SphereFlake", "SphereMesh", "Prim"]
if dodelete:
# print(f"deleting {cname}")
cpath = child_prim.GetPrimPath()
self._stage.RemovePrim(cpath)
# okc.execute("DeletePrimsCommand", paths=[cpath])
self.smf.Clear()
self.sff.Clear()
self._count = 0
def on_click_changeprim(self):
idx = self._prims.index(self._curprim) + 1
if idx >= len(self._prims):
idx = 0
self._curprim = self._prims[idx]
self.sfw._sf_primtospawn_but.text = f"{self._curprim}"
def UpdateNQuads(self):
ntris, nprims = self.sff.CalcTrisAndPrims()
elap = SphereFlakeFactory.GetLastGenTime()
if self.sfw._sf_depth_but is not None:
self.sfw._sf_spawn_but.text = f"Spawn ShereFlake\n tris:{ntris:,} prims:{nprims:,}\ngen: {elap:.2f} s"
def UpdateMQuads(self):
ntris, nprims = self.sff.CalcTrisAndPrims()
tottris = ntris*self.sff.p_nsfx*self.sff.p_nsfz
if self.sfw._msf_spawn_but is not None:
self.sfw._msf_spawn_but.text = f"Multi ShereFlake\ntris:{tottris:,} prims:{nprims:,}"
def UpdateGpuMemory(self):
nvidia_smi.nvmlInit()
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(0)
# card id 0 hardcoded here, there is also a call to get all available card ids, so we could iterate
gpuinfo = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)
self._gpuinfo = gpuinfo
om = float(1024*1024*1024)
msg = f"GPU Mem tot: {gpuinfo.total/om:.2f}: used: {gpuinfo.used/om:.2f} free: {gpuinfo.free/om:.2f} GB"
memused = psutil.virtual_memory().used
memtot = psutil.virtual_memory().total
memfree = psutil.virtual_memory().free
msg += f"\nSys Mem tot: {memtot/om:.2f}: used: {memused/om:.2f} free: {memfree/om:.2f} GB"
cores = psutil.cpu_count()
msg += f"\nCPU cores: {cores}"
refcnt = self._matman.refCount
ftccnt = self._matman.fetchCount
skpcnt = self._matman.skipCount
msg += f"\n Materials ref: {refcnt} fetched: {ftccnt} skipped: {skpcnt}"
self.sfw._memlabel.text = msg
def get_curmat_mat(self):
idx = self.sfw._sf_matbox_model.as_int
self._current_material_name = self._matkeys[idx]
return self._matman.GetMaterial(self._current_material_name)
def get_curmat_name(self):
idx = self.sfw._sf_matbox_model.as_int
self._current_material_name = self._matkeys[idx]
return self._current_material_name
def get_curaltmat_mat(self):
idx = self.sfw._sf_alt_matbox_model.as_int
self._current_alt_material_name = self._matkeys[idx]
return self._matman.GetMaterial(self._current_alt_material_name)
def get_curaltmat_name(self):
idx = self.sfw._sf_alt_matbox_model.as_int
self._current_alt_material_name = self._matkeys[idx]
return self._current_alt_material_name
def get_curfloormat_mat(self):
idx = self.sfw._sf_floor_matbox_model.as_int
self._current_floor_material_name = self._matkeys[idx]
return self._matman.GetMaterial(self._current_floor_material_name)
def get_curmat_bbox_name(self):
idx = self.sfw._bb_matbox_model.as_int
self._current_bbox_material_name = self._matkeys[idx]
return self._current_bbox_material_name
def get_curmat_bbox_mat(self):
idx = self.sfw._bb_matbox_model.as_int
self._current_bbox_material_name = self._matkeys[idx]
return self._matman.GetMaterial(self._current_bbox_material_name)
def get_curfloormat_name(self):
idx = self.sfw._sf_floor_matbox_model.as_int
self._current_floor_material_name = self._matkeys[idx]
return self._current_floor_material_name
def get_sf_genmode(self):
idx = self.sfw._genmodebox_model.as_int
return self._sf_gen_modes[idx]
def get_sf_genform(self):
idx = self.sfw._genformbox_model.as_int
return self._sf_gen_forms[idx]
def WriteRunLog(self, rundict=None):
if rundict is None:
rundict = {}
jline = json.dumps(rundict, sort_keys=True)
fname = "d:/nv/ov/log.txt"
with open(fname, "a") as f:
f.write(f"{jline}\n")
print("wrote log")
| 24,594 |
Python
| 38.415064 | 119 | 0.591811 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/demo.py
|
import carb
import omni.ui as ui
from ._widgets import CheckBoxGroup, CheckBoxGroupModel, TabGroup, BaseTab
class DemoWindow(ui.Window):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
with self.frame:
with ui.VStack():
model = CheckBoxGroupModel(["Red", "Blue", "Green"])
self.cb_group = CheckBoxGroup("My CheckBox Group", model)
def checkbox_changed(option_name, value):
carb.log_info("This checkbox changed.")
carb.log_info(f"{option_name} is {value}")
def checkbox_group_changed(values):
carb.log_info("The state of my CheckBoxGroup is now:")
for name, value in values:
carb.log_info(f"{name} is {value}")
model.subscribe_value_changed_fn(checkbox_changed)
model.subscribe_group_changed_fn(checkbox_group_changed)
tab_group = TabGroup([MyTab1("Tab Header 1"), MyTab2("Tab Header 2"), MyTab3("Tab Header 3"),])
def destroy(self) -> None:
super().destroy()
self.cb_group.destroy()
class MyTab1(BaseTab):
def build_fn(self):
with ui.VStack(style={"margin":5}):
ui.Label("Hello!", alignment=ui.Alignment.CENTER, height=25)
ui.Label("Check out this TabGroup Widget.", alignment=ui.Alignment.CENTER)
ui.Spacer(height=40)
class MyTab2(BaseTab):
def build_fn(self):
with ui.VStack(style={"margin":5}):
with ui.HStack(spacing=2):
color_model = ui.ColorWidget(0.125, 0.25, 0.5, width=0, height=0).model
for item in color_model.get_item_children():
component = color_model.get_item_value_model(item)
ui.FloatDrag(component)
class MyTab3(BaseTab):
def build_fn(self):
with ui.VStack(style={"margin":5}):
with ui.HStack():
ui.Label("Red: ", height=25)
ui.FloatSlider()
with ui.HStack():
ui.Label("Green: ", height=25)
ui.FloatSlider()
with ui.HStack():
ui.Label("Blue: ", height=25)
ui.FloatSlider()
| 2,279 |
Python
| 37.644067 | 111 | 0.548047 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/styles.py
|
import omni.ui as ui
checkbox_group_style = {
"HStack::checkbox_row" : {
"margin_width": 18,
"margin": 2
},
"Label::cb_label": {
"margin_width": 10
}
}
tab_group_style = {
"TabGroupBorder": {
"background_color": ui.color.transparent,
"border_color": ui.color(25),
"border_width": 1
},
"Rectangle::TabGroupHeader" : {
"background_color": ui.color(20),
},
"ZStack::TabGroupHeader":{
"margin_width": 1
}
}
tab_style = {
"" : {
"background_color": ui.color(31),
"corner_flag": ui.CornerFlag.TOP,
"border_radius": 4,
"color": ui.color(127)
},
":selected": {
"background_color": ui.color(56),
"color": ui.color(203)
},
"Label": {
"margin_width": 5,
"margin_height": 3
}
}
| 862 |
Python
| 19.547619 | 49 | 0.49768 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/sphereflake.py
|
import omni.kit.commands as okc
import omni.usd
import carb
import time
import asyncio
import math
from pxr import Gf, Usd, UsdGeom, UsdShade
from .spheremesh import SphereMeshFactory
from . import ovut
from .ovut import MatMan, get_setting, save_setting
# import omni.services.client
import aiohttp
latest_sf_gen_time = 0
class SphereFlakeFactory():
_matman: MatMan = None
_smf: SphereMeshFactory = None
p_genmode = "UsdSphere"
p_genform = "Classic"
p_depth = 1
p_rad = 50
p_radratio = 0.3
p_nsfx = 1
p_nsfy = 1
p_nsfz = 1
p_partialRender = False
p_partial_ssfx = 0
p_partial_ssfy = 0
p_partial_ssfz = 0
p_partial_nsfx = 1
p_partial_nsfy = 1
p_partial_nsfz = 1
p_parallelRender = False
p_parallel_nxbatch = 1
p_parallel_nybatch = 1
p_parallel_nzbatch = 1
p_sf_matname = "Mirror"
p_sf_alt_matname = "Red_Glass"
p_bb_matname = "Blue_Glass"
p_make_bounds_visible = False
_start_time = 0
_createlist: list = []
_bbcubelist: list = []
_org = Gf.Vec3f(0, 0, 0)
_xax = Gf.Vec3f(1, 0, 0)
_yax = Gf.Vec3f(0, 1, 0)
_zax = Gf.Vec3f(0, 0, 1)
def __init__(self, matman: MatMan, smf: SphereMeshFactory) -> None:
# self._stage = omni.usd.get_context().get_stage()
self._count = 0
self._matman = matman
self._smf = smf
def GenPrep(self):
self._smf.GenPrep()
pass
def LoadSettings(self):
print("SphereFlakeFactory.LoadSettings (trc)")
self.p_genmode = get_setting("p_genmode", self.p_genmode)
self.p_genform = get_setting("p_genform", self.p_genform)
self.p_depth = get_setting("p_depth", self.p_depth)
self.p_rad = get_setting("p_rad", self.p_rad)
self.p_radratio = get_setting("p_radratio", self.p_radratio)
self.p_nsfx = get_setting("p_nsfx", self.p_nsfx, db=True)
self.p_nsfy = get_setting("p_nsfy", self.p_nsfy, db=True)
self.p_nsfz = get_setting("p_nsfz", self.p_nsfz, db=True)
self.p_partialRender = get_setting("p_partialRender", self.p_partialRender)
self.p_partial_ssfx = get_setting("p_partial_ssfx", self.p_partial_ssfx)
self.p_partial_ssfy = get_setting("p_partial_ssfy", self.p_partial_ssfy)
self.p_partial_ssfz = get_setting("p_partial_ssfz", self.p_partial_ssfz)
self.p_partial_nsfx = get_setting("p_partial_nsfx", self.p_partial_nsfx)
self.p_partial_nsfy = get_setting("p_partial_nsfy", self.p_partial_nsfy)
self.p_partial_nsfz = get_setting("p_partial_nsfz", self.p_partial_nsfz)
self.p_parallelRender = get_setting("p_parallelRender", self.p_parallelRender)
self.p_parallel_nxbatch = get_setting("p_parallel_nxbatch", self.p_parallel_nxbatch)
self.p_parallel_nybatch = get_setting("p_parallel_nybatch", self.p_parallel_nybatch)
self.p_parallel_nzbatch = get_setting("p_parallel_nzbatch", self.p_parallel_nzbatch)
self.p_sf_matname = get_setting("p_sf_matname", self.p_sf_matname)
self.p_sf_alt_matname = get_setting("p_sf_alt_matname", self.p_sf_alt_matname)
self.p_bb_matname = get_setting("p_bb_matname", self.p_bb_matname)
self.p_bb_matname = get_setting("p_bb_matname", self.p_bb_matname)
self.p_make_bounds_visible = get_setting("p_make_bounds_visible", self.p_make_bounds_visible)
print(f"SphereFlakeFactory.LoadSettings: p_nsfx:{self.p_nsfx} p_nsfy:{self.p_nsfy} p_nsfz:{self.p_nsfz}")
def SaveSettings(self):
print("SphereFlakeFactory.SaveSettings (trc)")
save_setting("p_genmode", self.p_genmode)
save_setting("p_genform", self.p_genform)
save_setting("p_depth", self.p_depth)
save_setting("p_rad", self.p_rad)
save_setting("p_radratio", self.p_radratio)
save_setting("p_nsfx", self.p_nsfx)
save_setting("p_nsfy", self.p_nsfy)
save_setting("p_nsfz", self.p_nsfz)
save_setting("p_partialRender", self.p_partialRender)
save_setting("p_partial_ssfx", self.p_partial_ssfx)
save_setting("p_partial_ssfy", self.p_partial_ssfy)
save_setting("p_partial_ssfz", self.p_partial_ssfz)
save_setting("p_partial_nsfx", self.p_partial_nsfx)
save_setting("p_partial_nsfy", self.p_partial_nsfy)
save_setting("p_partial_nsfz", self.p_partial_nsfz)
save_setting("p_parallelRender", self.p_parallelRender)
save_setting("p_parallel_nxbatch", self.p_parallel_nxbatch)
save_setting("p_parallel_nybatch", self.p_parallel_nybatch)
save_setting("p_parallel_nzbatch", self.p_parallel_nzbatch)
save_setting("p_sf_matname", self.p_sf_matname)
save_setting("p_sf_alt_matname", self.p_sf_alt_matname)
save_setting("p_bb_matname", self.p_bb_matname)
save_setting("p_make_bounds_visible", self.p_make_bounds_visible)
@staticmethod
def GetGenModes():
return ["UsdSphere", "DirectMesh", "AsyncMesh", "OmniSphere"]
@staticmethod
def GetGenForms():
return ["Classic", "Flat-8"]
def Clear(self):
self._createlist = []
self._bbcubelist = []
def Set(self, attname: str, val: float):
if hasattr(self, attname):
self.__dict__[attname] = val
else:
carb.log.error(f"SphereFlakeFactory.Set: no attribute {attname}")
def CalcQuadsAndPrims(self):
nring = 9 if self.p_genform == "Classic" else 8
nlat = self._smf.p_nlat
nlng = self._smf.p_nlng
totquads = 0
totprims = 0
for i in range(self.p_depth+1):
nspheres = nring**(i)
nquads = nspheres * nlat * nlng
totquads += nquads
totprims += nspheres
return totquads, totprims
def CalcTrisAndPrims(self):
totquads, totprims = self.CalcQuadsAndPrims()
return totquads * 2, totprims
def GetCenterPosition(self, ix: int, iy: int, iz: int,
extentvec: Gf.Vec3f, gap: float = 1.1):
nx = self.p_nsfx
# ny = self.p_nsfy
nz = self.p_nsfz
ixoff = (nx-1)/2
iyoff = -0.28 # wierd offset to make it have the same height as single sphereflake
izoff = (nz-1)/2
x = (ix-ixoff) * extentvec[0] * gap * 2
y = (iy-iyoff) * extentvec[1] * gap * 2
# y = extentvec[1]
z = (iz-izoff) * extentvec[2] * gap * 2
return Gf.Vec3f(x, y, z)
@staticmethod
def GetLastGenTime():
global latest_sf_gen_time
return latest_sf_gen_time
def SpawnBBcube(self, primpath, cenpt, extent, bbmatname):
stage = omni.usd.get_context().get_stage()
xformPrim = UsdGeom.Xform.Define(stage, primpath)
UsdGeom.XformCommonAPI(xformPrim).SetTranslate((cenpt[0], cenpt[1], cenpt[2]))
UsdGeom.XformCommonAPI(xformPrim).SetScale((extent[0], extent[1], extent[2]))
cube = UsdGeom.Cube.Define(stage, primpath)
mtl = self._matman.GetMaterial(bbmatname)
UsdShade.MaterialBindingAPI(cube).Bind(mtl)
return cube
def GetSphereFlakeBoundingBox(self) -> Gf.Vec3f:
# sz = rad + (1+(radratio))**depth # old method
sz = self.p_rad
nrad = sz
for i in range(self.p_depth):
nrad = self.p_radratio*nrad
sz += 2*nrad
return Gf.Vec3f(sz, sz, sz)
def GetSphereFlakeBoundingBoxNxNyNz(self, gap: float = 1.1) -> Gf.Vec3f:
# sz = rad + (1+(radratio))**depth # old method
ext = self.GetSphereFlakeBoundingBox()
fx = -1
fy = -1
fz = -1
lx = self.p_nsfx
ly = self.p_nsfy
lz = self.p_nsfz
lcorn = self.GetCenterPosition(fx, fy, fz, ext, gap)
rcorn = self.GetCenterPosition(lx, ly, lz, ext, gap)
rv = rcorn - lcorn
return rv
async def fetch(self, session, url):
async with session.get(url) as response:
return await response.text()
async def GenerateManyParallel(self):
nxchunk = math.ceil(self.p_nsfx / self.p_parallel_nxbatch)
nychunk = math.ceil(self.p_nsfy / self.p_parallel_nybatch)
nzchunk = math.ceil(self.p_nsfz / self.p_parallel_nzbatch)
print(f"GenerateManyParallel: self.p_nsfx:{self.p_nsfx} self.p_nsfy:{self.p_nsfy} self.p_nsfz:{self.p_nsfz}")
original_matname = self.p_sf_matname
original_alt_matname = self.p_sf_alt_matname
omatname = self.p_sf_matname
amatname = self.p_sf_alt_matname
ibatch = 0
sfcount = 0
print(f"GenerateManyParallel: nxchunk:{nxchunk} nychunk:{nychunk} nzchunk:{nzchunk}")
# available_trans_sync = omni.services.client.get_available_transports(is_async=False)
# available_trans_async = omni.services.client.get_available_transports(is_async=True)
# # availprot = omni.services.client.get_available_protocols()
# client = omni.services.client.AsyncClient("http://localhost:8211/sphereflake")
self._createlist = []
self._bbcubelist = []
tasks = []
doremote = False
if doremote:
baseurl = "http://localhost:8211/sphereflake/build-sf-set"
sess = aiohttp.ClientSession()
for iix in range(self.p_parallel_nxbatch):
for iiy in range(self.p_parallel_nybatch):
for iiz in range(self.p_parallel_nzbatch):
iixyz = iix + iiy + iiz
if iixyz % 2 == 0:
self.p_sf_matname = omatname
else:
self.p_sf_matname = amatname
print(f" GenerateManyParallel: batch:{ibatch} mat:{self.p_sf_matname}")
sx = iix*nxchunk
sy = iiy*nychunk
sz = iiz*nzchunk
nx = nxchunk
ny = nychunk
nz = nzchunk
nnx = self.p_nsfx
nny = self.p_nsfy
nnz = self.p_nsfz
nx = min(nx, nnx-sx)
ny = min(ny, nny-sy)
nz = min(nz, nnz-sz)
if doremote:
url = f"{baseurl}?matname={self.p_sf_matname}"
url += f"&sx={sx}&nx={nx}&nnx={nnx}"
url += f"&sy={sy}&ny={ny}&nny={nny}"
url += f"&sz={sz}&nz={nz}&nnz={nnz}"
t = asyncio.create_task(self.fetch(sess, url))
t.add_done_callback(tasks.remove)
tasks.append(t)
print(f"GMP sf_ - url:{url}")
sfcount += self.GenerateManySubcube(sx, sy, sz, nx, ny, nz)
ibatch += 1
if doremote:
print(f"GMP: sf_ waiting for tasks to complete ln:{len(tasks)}")
txts = await asyncio.gather(*tasks)
print(f"GMP: sf_ tasks completed")
for txt in txts:
print(f"GMP: sf_ txt:{txt}")
await sess.close()
self.p_sf_matname = original_matname
self.p_sf_alt_matname = original_alt_matname
return sfcount
def GenerateMany(self):
if self.p_partialRender:
sx = self.p_partial_ssfx
sy = self.p_partial_ssfy
sz = self.p_partial_ssfz
nx = self.p_partial_nsfx
ny = self.p_partial_nsfy
nz = self.p_partial_nsfz
else:
sx = 0
sy = 0
sz = 0
nx = self.p_nsfx
ny = self.p_nsfy
nz = self.p_nsfz
self._createlist = []
self._bbcubelist = []
sfcount = self.GenerateManySubcube(sx, sy, sz, nx, ny, nz)
return sfcount
def GenerateManySubcube(self, sx: int, sy: int, sz: int, nx: int, ny: int, nz: int) -> int:
self.GenPrep()
cpt = Gf.Vec3f(0, self.p_rad, 0)
# extentvec = self.GetFlakeExtent(depth, self._rad, self._radratio)
extentvec = self.GetSphereFlakeBoundingBox()
count = self._count
for iix in range(nx):
for iiy in range(ny):
for iiz in range(nz):
ix = iix+sx
iy = iiy+sy
iz = iiz+sz
count += 1
# primpath = f"/World/SphereFlake_{count}"
primpath = f"/World/SphereFlake_{ix}_{iy}_{iz}__{nx}_{ny}_{nz}"
cpt = self.GetCenterPosition(ix, iy, iz, extentvec)
self.Generate(primpath, cpt)
self._createlist.append(primpath)
bnd_cubepath = primpath+"/bounds"
bnd_cube = self.SpawnBBcube(bnd_cubepath, cpt, extentvec, self.p_bb_matname)
self._bbcubelist.append(bnd_cubepath)
if self.p_make_bounds_visible:
UsdGeom.Imageable(bnd_cube).MakeVisible()
else:
UsdGeom.Imageable(bnd_cube).MakeInvisible()
return count
def ToggleBoundsVisiblity(self):
# print(f"ToggleBoundsVisiblity: {self._bbcubelist}")
okc.execute('ToggleVisibilitySelectedPrims', selected_paths=self._bbcubelist)
def Generate(self, sphflkname: str, cenpt: Gf.Vec3f):
global latest_sf_gen_time
self._start_time = time.time()
self._total_quads = 0
self._nring = 8
ovut.delete_if_exists(sphflkname)
stage = omni.usd.get_context().get_stage()
xformPrim = UsdGeom.Xform.Define(stage, sphflkname)
UsdGeom.XformCommonAPI(xformPrim).SetTranslate((0, 0, 0))
UsdGeom.XformCommonAPI(xformPrim).SetRotate((0, 0, 0))
mxdepth = self.p_depth
basept = cenpt
matname = self.p_sf_matname
self.GenRecursively(sphflkname, matname, mxdepth, self.p_depth, basept, cenpt, self.p_rad)
elap = time.time() - self._start_time
# print(f"GenerateSF {sphflkname} {matname} {depth} {cenpt} totquads:{self._total_quads} in {elap:.3f} secs")
latest_sf_gen_time = elap
def GenRecursively(self, sphflkname: str, matname: str, mxdepth: int, depth: int, basept: Gf.Vec3f,
cenpt: Gf.Vec3f, rad: float):
# xformPrim = UsdGeom.Xform.Define(self._stage, sphflkname)
# UsdGeom.XformCommonAPI(xformPrim).SetTranslate((0, 0, 0))
# UsdGeom.XformCommonAPI(xformPrim).SetRotate((0, 0, 0))
meshname = sphflkname + "/SphereMesh"
# spheremesh = UsdGeom.Mesh.Define(self._stage, meshname)
if self.p_genmode == "AsyncMesh":
meshname = sphflkname + "/SphereMeshAsync"
asyncio.ensure_future(self._smf.CreateMeshAsync(meshname, matname, cenpt, rad))
elif self.p_genmode == "DirectMesh":
meshname = sphflkname + "/SphereMesh"
self._smf.CreateMesh(meshname, matname, cenpt, rad)
elif self.p_genmode == "OmniSphere":
meshname = sphflkname + "/OmniSphere"
okc.execute('CreateMeshPrimWithDefaultXform', prim_type="Sphere", prim_path=meshname)
sz = rad/50 # 50 is the default radius of the sphere prim
okc.execute('TransformMultiPrimsSRTCpp',
count=1,
paths=[meshname],
new_scales=[sz, sz, sz],
new_translations=[cenpt[0], cenpt[1], cenpt[2]])
mtl = self._matman.GetMaterial(matname)
stage = omni.usd.get_context().get_stage()
prim: Usd.Prim = stage.GetPrimAtPath(meshname)
UsdShade.MaterialBindingAPI(prim).Bind(mtl)
elif self.p_genmode == "UsdSphere":
meshname = sphflkname + "/UsdSphere"
stage = omni.usd.get_context().get_stage()
xformPrim = UsdGeom.Xform.Define(stage, meshname)
sz = rad
UsdGeom.XformCommonAPI(xformPrim).SetTranslate((cenpt[0], cenpt[1], cenpt[2]))
UsdGeom.XformCommonAPI(xformPrim).SetScale((sz, sz, sz))
spheremesh = UsdGeom.Sphere.Define(stage, meshname)
mtl = self._matman.GetMaterial(matname)
UsdShade.MaterialBindingAPI(spheremesh).Bind(mtl)
if depth > 0:
form = self.p_genform
if form == "Classic":
thoff = 0
phioff = -20*math.pi/180
self._nring = 6
self.GenRing(sphflkname, "r1", matname, mxdepth, depth, basept, cenpt, 6, rad, thoff, phioff)
thoff = 30*math.pi/180
phioff = 55*math.pi/180
self._nring = 3
self.GenRing(sphflkname, "r2", matname, mxdepth, depth, basept, cenpt, 3, rad, thoff, phioff)
else:
thoff = 0
phioff = 0
self._nring = 8
self.GenRing(sphflkname, "r1", matname, mxdepth, depth, basept, cenpt, self._nring, rad, thoff, phioff)
def GenRing(self, sphflkname: str, ringname: str, matname: str, mxdepth: int, depth: int,
basept: Gf.Vec3f, cenpt: Gf.Vec3f,
nring: int, rad: float,
thoff: float, phioff: float):
offvek = cenpt - basept
len = offvek.GetLength()
if len > 0:
lxax = ovut.cross_product(offvek, self._yax)
if lxax.GetLength() == 0:
lxax = ovut.cross_product(offvek, self._zax)
lxax.Normalize()
lzax = ovut.cross_product(offvek, lxax)
lzax.Normalize()
lyax = offvek
lyax.Normalize()
else:
lxax = self._xax
lyax = self._yax
lzax = self._zax
nrad = rad * self.p_radratio
offfak = 1 + self.p_radratio
sphi = math.sin(phioff)
cphi = math.cos(phioff)
for i in range(nring):
theta = thoff + (i*2*math.pi/nring)
x = cphi*rad*math.sin(theta)
y = sphi*rad
z = cphi*rad*math.cos(theta)
npt = x*lxax + y*lyax + z*lzax
subname = f"{sphflkname}/{ringname}_sf_{i}"
self.GenRecursively(subname, matname, mxdepth, depth-1, cenpt, cenpt+offfak*npt, nrad)
| 18,316 |
Python
| 39.886161 | 119 | 0.567264 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/extension.py
|
import omni.ext # this needs to be included in an extension's extension.py
from .ovut import MatMan, write_out_syspath, write_out_path
from .sphereflake import SphereMeshFactory, SphereFlakeFactory
from .sfcontrols import SfControls
from .sfwindow import SfcWindow
import omni.usd
# Omni imports
import omni.client
import omni.usd_resolver
import os
# import contextlib
# @contextlib.asynccontextmanager
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class SphereflakeBenchmarkExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
_window_sfcon = None
_matman: MatMan = None
_smf: SphereMeshFactory = None
_sff: SphereFlakeFactory = None
_sfc: SfControls = None
_sfw: SfcWindow = None
_settings = None
def on_stage(self, ext_id):
print(f"[omni.sphereflake] SphereflakeBenchmarkExtension on_stage - ext_id: {ext_id} (trc)")
_stageid = omni.usd.get_context().get_stage_id()
self._stageid = _stageid
pid = os.getpid()
print(f"[omni.sphereflake] SphereflakeBenchmarkExtension on_stage - stageid: {_stageid} pid:{pid} (trc)")
self._window_sfcon.ensure_stage()
def WriteOutPathAndSysPath(self, basename="d:/nv/ov/sphereflake_benchmark"):
write_out_syspath(f"{basename}_syspath.txt")
write_out_path(f"{basename}_path.txt")
def on_startup(self, ext_id):
self._stageid = omni.usd.get_context().get_stage_id()
pid = os.getpid()
print(f"[omni.sphereflake] SphereflakeBenchmarkExtension on_startup - stageid:{self._stageid} pid:{pid} (trc)")
# Write out syspath and path
# self.WriteOutPathAndSysPath()
# Model objects
self._matman = MatMan()
self._smf = SphereMeshFactory(self._matman)
self._sff = SphereFlakeFactory(self._matman, self._smf)
self._sff.LoadSettings()
# Controller objects
self._sfc = SfControls(self._matman, self._smf, self._sff)
# View objects
self._sfw = SfcWindow(sfc=self._sfc)
self._sfw.DockWindow()
print("[omni.sphereflake] SphereflakeBenchmarkExtension on_startup - done (trc))")
def on_shutdown(self):
print("[omni.sphereflake] SphereflakeBenchmarkExtension on_shutdown (trc)")
self._sfc.SaveSettings()
self._sfw.SaveSettings()
self._sfc.Close()
self._sfw.destroy()
| 2,727 |
Python
| 37.422535 | 119 | 0.682435 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/hello.py
|
print("Hello world")
| 21 |
Python
| 9.999995 | 20 | 0.714286 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/_widgets.py
|
from functools import partial
from typing import List
import omni.ui as ui
from . import styles
class CheckBoxGroupModel:
def __init__(self, option_names:List):
self.options = []
self.bool_models = []
self.subscriptions = []
self._single_callbacks = []
self._group_callbacks = []
for option in option_names:
self.add_checkbox_option(option)
def add_checkbox_option(self, option_name):
self.options.append(option_name)
bool_model = ui.SimpleBoolModel()
next_index = len(self.bool_models)
self.bool_models.append(bool_model)
self.subscriptions.append(bool_model.subscribe_value_changed_fn(partial(self.on_model_value_changed, next_index)))
return bool_model
def subscribe_value_changed_fn(self, callback_fn):
self._single_callbacks.append(callback_fn)
def subscribe_group_changed_fn(self, callback_fn):
self._group_callbacks.append(callback_fn)
def on_model_value_changed(self, index:int, model:ui.SimpleBoolModel):
for callback in self._single_callbacks:
option = self.options[index]
callback(option, model.as_bool)
for callback in self._group_callbacks:
checkbox_values = []
for name, bool_model in zip(self.options, self.bool_models):
checkbox_values.append((name, bool_model.as_bool))
callback(checkbox_values)
def get_bool_model(self, option_name):
index = self.options.index(option_name)
return self.bool_models[index]
def get_checkbox_options(self):
return self.options
def destroy(self):
self.subscriptions = None
self._single_callbacks = None
self._group_callbacks = None
class CheckBoxGroup:
def __init__(self, group_name:str, model:CheckBoxGroupModel):
self.group_name = group_name
self.model = model
self._build_widget()
def _build_widget(self):
with ui.VStack(width=0, height=0, style=styles.checkbox_group_style):
ui.Label(f"{self.group_name}:")
for option in self.model.get_checkbox_options():
with ui.HStack(name="checkbox_row", width=0, height=0):
ui.CheckBox(model=self.model.get_bool_model(option))
ui.Label(option, name="cb_label")
def destroy(self):
self.model.destroy()
class BaseTab:
def __init__(self, name):
self.name = name
def build_fn(self):
"""Builds the contents for the tab.
You must implement this function with the UI construction code that you want for
you tab. This is set to be called by a ui.Frame so it must have only a single
top-level widget.
"""
raise NotImplementedError("You must implement Tab.build_fn")
class TabGroup:
def __init__(self, tabs: List[BaseTab]):
self.frame = ui.Frame(build_fn=self._build_widget)
if not tabs:
raise ValueError("You must provide at least one BaseTab object.")
self.tabs = tabs
self.tab_containers = []
self.tab_headers = []
def _build_widget(self):
with ui.ZStack(style=styles.tab_group_style):
ui.Rectangle(style_type_name_override="TabGroupBorder")
with ui.VStack():
ui.Spacer(height=1)
with ui.ZStack(height=0, name="TabGroupHeader"):
ui.Rectangle(name="TabGroupHeader")
with ui.VStack():
ui.Spacer(height=2)
with ui.HStack(height=0, spacing=4):
for x, tab in enumerate(self.tabs):
tab_header = ui.ZStack(width=0, style=styles.tab_style)
self.tab_headers.append(tab_header)
with tab_header:
rect = ui.Rectangle()
rect.set_mouse_released_fn(partial(self._tab_clicked, x))
ui.Label(tab.name)
with ui.ZStack():
for x, tab in enumerate(self.tabs):
container_frame = ui.Frame(build_fn=tab.build_fn)
self.tab_containers.append(container_frame)
container_frame.visible = False
# Initialize first tab
self.select_tab(0)
def select_tab(self, index: int):
for x in range(len(self.tabs)):
if x == index:
self.tab_containers[x].visible = True
self.tab_headers[x].selected = True
else:
self.tab_containers[x].visible = False
self.tab_headers[x].selected = False
def _tab_clicked(self, index, x, y, button, modifier):
if button == 0:
self.select_tab(index)
def append_tab(self, tab: BaseTab):
pass
def destroy(self):
self.frame.destroy()
| 5,092 |
Python
| 35.905797 | 122 | 0.5652 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/ovut.py
|
import omni.kit.commands as okc
import omni.usd
import os
import sys
import math
from pxr import Gf, Sdf, UsdShade
from typing import Tuple, List
import carb.settings
_settings = None
def _init_settings():
global _settings
if _settings is None:
_settings = carb.settings.get_settings()
return _settings
def get_setting(name, default, db=False):
settings = _init_settings()
key = f"/persistent/omni/sphereflake/{name}"
val = settings.get(key)
if db:
oval = val
if oval is None:
oval = "None"
if val is None:
val = default
if db:
print(f"get_setting {name} {oval} {val}")
return val
def save_setting(name, value):
settings = _init_settings()
key = f"/persistent/omni/sphereflake/{name}"
settings.set(key, value)
def truncf(number, digits) -> float:
# Improve accuracy with floating point operations, to avoid truncate(16.4, 2) = 16.39 or truncate(-1.13, 2) = -1.12
nbDecimals = len(str(number).split('.')[1])
if nbDecimals <= digits:
return number
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
def delete_if_exists(primpath: str) -> None:
ctx = omni.usd.get_context()
stage = ctx.get_stage()
if stage.GetPrimAtPath(primpath):
stage.RemovePrim(primpath)
# okc.execute("DeletePrimsCommand", paths=[primpath])
def cross_product(v1: Gf.Vec3f, v2: Gf.Vec3f) -> Gf.Vec3f:
x = v1[1] * v2[2] - v1[2] * v2[1]
y = v1[2] * v2[0] - v1[0] * v2[2]
z = v1[0] * v2[1] - v1[1] * v2[0]
rv = Gf.Vec3f(x, y, z)
return rv
def write_out_path(fname: str = 'd:/nv/ov/path.txt') -> None:
# Write out the path to a file
path = os.environ["PATH"]
with open(fname, "w") as f:
npath = path.replace(";", "\n")
f.write(npath)
def write_out_syspath(fname: str = 'd:/nv/ov/syspath.txt', indent=False) -> None:
# Write out the python syspath to a file
# Indent should be True if to be used for the settings.json python.analsys.extraPaths setting
pplist = sys.path
with open(fname, 'w') as f:
for line in pplist:
nline = line.replace("\\", "/")
if indent:
nnline = f" \"{nline}\",\n"
else:
nnline = f"\"{nline}\",\n"
f.write(nnline)
def read_in_syspath(fname: str = 'd:/nv/ov/syspath.txt') -> None:
# Read in the python path from a file
with open(fname, 'r') as f:
for line in f:
nline = line.replace(',', '')
nline = nline.replace('"', '')
nline = nline.replace('"', '')
nline = nline.replace('\n', '')
nline = nline.replace(' ', '')
sys.path.append(nline)
class MatMan():
matlib = {}
def __init__(self) -> None:
self.CreateMaterials()
pass
def MakePreviewSurfaceTexMateral(self, matname: str, fname: str):
# This is all materials
matpath = "/World/Looks"
mlname = f'{matpath}/boardMat_{fname.replace(".","_")}'
stage = omni.usd.get_context().get_stage()
material = UsdShade.Material.Define(stage, mlname)
pbrShader = UsdShade.Shader.Define(stage, f'{mlname}/PBRShader')
pbrShader.CreateIdAttr("UsdPreviewSurface")
pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(0.4)
pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(0.0)
material.CreateSurfaceOutput().ConnectToSource(pbrShader.ConnectableAPI(), "surface")
stReader = UsdShade.Shader.Define(stage, f'{matpath}/stReader')
stReader.CreateIdAttr('UsdPrimvarReader_float2')
diffuseTextureSampler = UsdShade.Shader.Define(stage, f'{matpath}/diffuseTexture')
diffuseTextureSampler.CreateIdAttr('UsdUVTexture')
ASSETS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# print(f"ASSETS_DIRECTORY {ASSETS_DIRECTORY}")
texfile = f"{ASSETS_DIRECTORY}\\{fname}"
# print(texfile)
# print(os.path.exists(texfile))
diffuseTextureSampler.CreateInput('file', Sdf.ValueTypeNames.Asset).Set(texfile)
diffuseTextureSampler.CreateInput("st", Sdf.ValueTypeNames.Float2).ConnectToSource(stReader.ConnectableAPI(),
'result')
diffuseTextureSampler.CreateOutput('rgb', Sdf.ValueTypeNames.Float3)
pbrShader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).ConnectToSource(
diffuseTextureSampler.ConnectableAPI(), 'rgb')
stInput = material.CreateInput('frame:stPrimvarName', Sdf.ValueTypeNames.Token)
stInput.Set('st')
stReader.CreateInput('varname', Sdf.ValueTypeNames.Token).ConnectToSource(stInput)
self.matlib[matname]["mat"] = material
return material
def SplitRgb(self, rgb: str) -> Tuple[float, float, float]:
sar = rgb.split(",")
r = float(sar[0])
g = float(sar[1])
b = float(sar[2])
return (r, g, b)
def MakePreviewSurfaceMaterial(self, matname: str, rgb: str):
mtl_path = Sdf.Path(f"/World/Looks/Presurf_{matname}")
stage = omni.usd.get_context().get_stage()
mtl = UsdShade.Material.Define(stage, mtl_path)
shader = UsdShade.Shader.Define(stage, mtl_path.AppendPath("Shader"))
shader.CreateIdAttr("UsdPreviewSurface")
rgbtup = self.SplitRgb(rgb)
shader.CreateInput("diffuseColor", Sdf.ValueTypeNames.Color3f).Set(rgbtup)
shader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(0.5)
shader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(0.0)
mtl.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface")
# self.matlib[matname] = {"name": matname, "typ": "mtl", "mat": mtl}
self.matlib[matname]["mat"] = mtl
return mtl
refCount: int = 0
fetchCount: int = 0
skipCount: int = 0
def CopyRemoteMaterial(self, matname, urlbranch, force=False):
print(f"CopyRemoteMaterial matname:{matname} urlbranch:{urlbranch} force:{force}")
stage = omni.usd.get_context().get_stage()
baseurl = 'https://omniverse-content-production.s3.us-west-2.amazonaws.com'
url = f'{baseurl}/Materials/{urlbranch}.mdl'
mpath = f'/World/Looks/{matname}'
action = ""
# Note we should not execute the next command if the material already exists
if force or not stage.GetPrimAtPath(mpath):
okc.execute('CreateMdlMaterialPrimCommand', mtl_url=url, mtl_name=matname, mtl_path=mpath)
action = "fetch"
self.fetchCount += 1
else:
action = "skip"
self.skipCount += 1
mtl: UsdShade.Material = UsdShade.Material(stage.GetPrimAtPath(mpath))
print(f"CopyRemoteMaterial {mpath} mtl:{mtl} action:{action}")
# self.matlib[matname] = {"name": matname, "typ": "rgb", "mat": mtl}
self.matlib[matname]["mat"] = mtl
return mtl
def RealizeMaterial(self, matname: str):
typ = self.matlib[matname]["typ"]
spec = self.matlib[matname]["spec"]
if typ == "mtl":
self.CopyRemoteMaterial(matname, spec)
elif typ == "tex":
self.MakePreviewSurfaceTexMateral(matname, spec)
else:
self.MakePreviewSurfaceMaterial(matname, spec)
self.matlib[matname]["realized"] = True
def SetupMaterial(self, matname: str, typ: str, spec: str):
# print(f"SetupMaterial {matname} {typ} {spec}")
matpath = f"/World/Looks/{matname}"
self.matlib[matname] = {"name": matname,
"typ": typ,
"mat": None,
"path": matpath,
"realized": False,
"spec": spec}
def CreateMaterials(self):
self.SetupMaterial("red", "rgb", "1,0,0")
self.SetupMaterial("green", "rgb", "0,1,0")
self.SetupMaterial("blue", "rgb", "0,0,1")
self.SetupMaterial("yellow", "rgb", "1,1,0")
self.SetupMaterial("cyan", "rgb", "0,1,1")
self.SetupMaterial("magenta", "rgb", "1,0,1")
self.SetupMaterial("white", "rgb", "1,1,1")
self.SetupMaterial("black", "rgb", "0,0,0")
self.SetupMaterial("Blue_Glass", "mtl", "Base/Glass/Blue_Glass")
self.SetupMaterial("Red_Glass", "mtl", "Base/Glass/Red_Glass")
self.SetupMaterial("Green_Glass", "mtl", "Base/Glass/Green_Glass")
self.SetupMaterial("Clear_Glass", "mtl", "Base/Glass/Clear_Glass")
self.SetupMaterial("Mirror", "mtl", "Base/Glass/Mirror")
self.SetupMaterial("sunset_texture", "tex", "sunset.png")
def GetMaterialNames(self) -> List[str]:
return list(self.matlib.keys())
def GetMaterial(self, key):
self.refCount += 1
if key in self.matlib:
if not self.matlib[key]["realized"]:
self.RealizeMaterial(key)
rv = self.matlib[key]["mat"]
else:
rv = None
return rv
| 9,205 |
Python
| 37.041322 | 119 | 0.596306 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/sfwindow.py
|
import carb.events
import omni.ui as ui
from omni.ui import color as clr
import asyncio
from ._widgets import TabGroup, BaseTab
from .sphereflake import SphereMeshFactory, SphereFlakeFactory
from .sfcontrols import SfControls
from .ovut import get_setting, save_setting
class SfcWindow(ui.Window):
darkgreen = clr("#004000")
darkblue = clr("#000040")
darkred = clr("#400000")
darkyellow = clr("#404000")
darkpurple = clr("#400040")
darkcyan = clr("#004040")
marg = 2
# Status
_statuslabel: ui.Label = None
_memlabel: ui.Label = None
# Sphereflake params
prframe: ui.CollapsableFrame = None
drframe: ui.CollapsableFrame = None
docollapse_prframe = False
docollapse_drframe = False
_sf_depth_but: ui.Button = None
_sf_spawn_but: ui.Button = None
_sf_nlat_but: ui.Button = None
_sf_nlng_but: ui.Button = None
_sf_radratio_slider_model: ui.SimpleFloatModel = None
_genmodebox: ui.ComboBox = None
_genmodebox_model: ui.SimpleIntModel = None
_genformbox: ui.ComboBox = None
_genformbox_model: ui.SimpleIntModel = None
# Material tab
_sf_matbox: ui.ComboBox = None
_sf_matbox_model: ui.SimpleIntModel = None
_sf_alt_matbox: ui.ComboBox = None
_sf_alt_matbox_model: ui.SimpleIntModel = None
_bb_matbox: ui.ComboBox = None
_bb_matbox_model: ui.SimpleIntModel = None
_sf_floor_matbox: ui.ComboBox = None
_sf_floor_matbox_model: ui.SimpleIntModel = None
# Options
writelog_checkbox: ui.CheckBox = None
writelog_checkbox_model = None
writelog_seriesname: ui.StringField = None
writelog_seriesname_model = None
# state
sfc: SfControls
smf: SphereMeshFactory
sff: SphereFlakeFactory
def __init__(self, *args, **kwargs):
super().__init__(title="SphereFlake Controls", height=300, width=300, *args, **kwargs)
print(f"SfcWindow.__init__ (trc)")
self.sfc = kwargs["sfc"]
self.sfc.sfw = self # intentionally circular
self.smf = self.sfc.smf
self.sff = self.sfc.sff
self.LoadSettings()
self.BuildControlModels()
self.BuildWindow()
self.sfc.LateInit()
def BuildControlModels(self):
# models for controls that are used in the logic need to be built outside the build_fn
# since that will only be called when the tab is selected and displayed
sfc = self.sfc
sff = sfc.sff
# sphereflake params
self._sf_radratio_slider_model = ui.SimpleFloatModel(sff.p_radratio)
idx = sff.GetGenModes().index(sff.p_genmode)
self._genmodebox_model = ui.SimpleIntModel(idx)
idx = sff.GetGenForms().index(sff.p_genform)
self._genformbox_model = ui.SimpleIntModel(idx)
# materials
matlist = sfc._matkeys
idx = matlist.index(sff.p_sf_matname)
self._sf_matbox_model = ui.SimpleIntModel(idx)
idx = matlist.index(sff.p_sf_alt_matname)
self._sf_alt_matbox_model = ui.SimpleIntModel(idx)
idx = matlist.index(sff.p_bb_matname)
self._bb_matbox_model = ui.SimpleIntModel(idx)
idx = matlist.index(sfc._current_floor_material_name)
self._sf_floor_matbox_model = ui.SimpleIntModel(idx)
# options
self.writelog_checkbox_model = ui.SimpleBoolModel(sfc.p_writelog)
self.writelog_seriesname_model = ui.SimpleStringModel(sfc.p_logseriesname)
def BuildWindow(self):
print("SfcWindow.BuildWindow (trc)")
sfc = self.sfc
with self.frame:
with ui.VStack():
t1 = SfcTabMulti(self)
t2 = SfcTabSphereFlake(self)
t3 = SfcTabShapes(self)
t4 = SfcTabMaterials(self)
t5 = SfcTabOptions(self)
self.tab_group = TabGroup([t1, t2, t3, t4, t5])
self._statuslabel = ui.Label("Status: Ready")
self._memlabel = ui.Button("Memory tot/used/free", clicked_fn=sfc.UpdateGpuMemory)
ui.Button("Clear Primitives",
style={'background_color': self.darkyellow},
clicked_fn=lambda: sfc.on_click_clearprims())
def DockWindow(self, wintitle="Property"):
print(f"Docking to {wintitle} (trc)")
handle = ui._ui.Workspace.get_window(wintitle)
self.dock_in(handle, ui._ui.DockPosition.SAME)
self.deferred_dock_in(wintitle, ui._ui.DockPolicy.TARGET_WINDOW_IS_ACTIVE)
def LoadSettings(self):
# print("SfcWindow.LoadSettings")
self.docollapse_prframe = get_setting("ui_pr_frame_collapsed", False)
self.docollapse_drframe = get_setting("ui_dr_frame_collapsed", False)
# print(f"docollapse_prframe: {self.docollapse_prframe} docollapse_drframe: {self.docollapse_drframe}")
def SaveSettings(self):
# print("SfcWindow.SaveSettings")
if (self.prframe is not None):
save_setting("ui_pr_frame_collapsed", self.prframe.collapsed)
if (self.drframe is not None):
save_setting("ui_dr_frame_collapsed", self.drframe.collapsed)
# print(f"docollapse_prframe: {self.prframe.collapsed} docollapse_drframe: {self.drframe.collapsed}")
class SfcTabMulti(BaseTab):
sfw: SfcWindow
sfc: SfControls
def __init__(self, sfw: SfcWindow):
super().__init__("Multi")
self.sfw = sfw
self.sfc = sfw.sfc
# print(f"SfcTabMulti.init {type(sfc)}")
def build_fn(self):
print("SfcTabMulti.build_fn (trc)")
sfw: SfcWindow = self.sfw
sfc: SfControls = self.sfc
sff: SphereFlakeFactory = self.sfw.sff
# print(f"SfcTabMulti.build_fn {type(sfc)}")
with ui.VStack(style={"margin": sfw.marg}):
with ui.VStack():
with ui.HStack():
clkfn = lambda: asyncio.ensure_future(sfc.on_click_multi_sphereflake()) # noqa : E731
sfw._msf_spawn_but = ui.Button("Multi ShereFlake",
style={'background_color': sfw.darkred},
clicked_fn=clkfn)
with ui.VStack(width=200):
clkfn = lambda x, y, b, m: sfc.on_click_sfx(x, y, b, m) # noqa : E731
sfw._nsf_x_but = ui.Button(f"SF x: {sff.p_nsfx}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_sfy(x, y, b, m) # noqa : E731
sfw._nsf_y_but = ui.Button(f"SF y: {sff.p_nsfy}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_sfz(x, y, b, m) # noqa : E731
sfw._nsf_z_but = ui.Button(f"SF z: {sff.p_nsfz}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
sfw._tog_bounds_but = ui.Button(f"Bounds:{sfc._bounds_visible}",
style={'background_color': sfw.darkcyan},
clicked_fn=sfc.toggle_bounds)
sfw.prframe = ui.CollapsableFrame("Partial Renders", collapsed=sfw.docollapse_prframe)
with sfw.prframe:
with ui.VStack():
sfw._partial_render_but = ui.Button(f"Partial Render {sff.p_partialRender}",
style={'background_color': sfw.darkcyan},
clicked_fn=sfc.toggle_partial_render)
with ui.HStack():
clkfn = lambda x, y, b, m: sfc.on_click_parital_sfsx(x, y, b, m) # noqa : E731
sfw._part_nsf_sx_but = ui.Button(f"SF partial sx: {sff.p_partial_ssfx}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_parital_sfsy(x, y, b, m) # noqa : E731
sfw._part_nsf_sy_but = ui.Button(f"SF partial sy: {sff.p_partial_ssfy}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_parital_sfsz(x, y, b, m) # noqa : E731
sfw._part_nsf_sz_but = ui.Button(f"SF partial sz: {sff.p_partial_ssfz}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
with ui.HStack():
clkfn = lambda x, y, b, m: sfc.on_click_parital_sfnx(x, y, b, m) # noqa : E731
sfw._part_nsf_nx_but = ui.Button(f"SF partial nx: {sff.p_partial_nsfx}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_parital_sfny(x, y, b, m) # noqa : E731
sfw._part_nsf_ny_but = ui.Button(f"SF partial ny: {sff.p_partial_nsfy}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_parital_sfnz(x, y, b, m) # noqa : E731
sfw._part_nsf_nz_but = ui.Button(f"SF partial nz: {sff.p_partial_nsfz}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
sfw.drframe = ui.CollapsableFrame("Distributed Renders", collapsed=sfw.docollapse_drframe)
with sfw.drframe:
with ui.VStack():
sfw._parallel_render_but = ui.Button(f"Distributed Render {sff.p_parallelRender}",
style={'background_color': sfw.darkcyan},
clicked_fn=sfc.toggle_parallel_render)
with ui.HStack():
clkfn = lambda x, y, b, m: sfc.on_click_parallel_nxbatch(x, y, b, m) # noqa : E731
sfw._parallel_nxbatch_but = ui.Button(f"SF batch x: {sff.p_parallel_nxbatch}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_parallel_nybatch(x, y, b, m) # noqa : E731
sfw._parallel_nybatch_but = ui.Button(f"SF batch y: {sff.p_parallel_nybatch}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
clkfn = lambda x, y, b, m: sfc.on_click_parallel_nzbatch(x, y, b, m) # noqa : E731
sfw._parallel_nzbatch_but = ui.Button(f"SF batch z: {sff.p_parallel_nzbatch}",
style={'background_color': sfw.darkblue},
mouse_pressed_fn=clkfn)
class SfcTabSphereFlake(BaseTab):
sfc: SfControls = None
def __init__(self, sfw: SfcWindow):
super().__init__("SphereFlake")
self.sfw = sfw
self.sfc = sfw.sfc
def build_fn(self):
print("SfcTabSphereFlake.build_fn (trc)")
sfw = self.sfw
sfc = self.sfc
sff = self.sfw.sff
smf = self.sfw.smf
# print(f"SfcTabMulti.build_fn sfc:{type(sfc)} ")
with ui.VStack(style={"margin": sfw.marg}):
with ui.VStack():
with ui.HStack():
sfw._sf_spawn_but = ui.Button("Spawn SphereFlake",
style={'background_color': sfw.darkred},
clicked_fn=lambda: sfc.on_click_sphereflake())
with ui.VStack(width=200):
sfw._sf_depth_but = ui.Button(f"Depth:{sff.p_depth}",
style={'background_color': sfw.darkgreen},
mouse_pressed_fn= # noqa : E251
lambda x, y, b, m: sfc.on_click_sfdepth(x, y, b, m))
with ui.HStack():
ui.Label("Radius Ratio: ",
style={'background_color': sfw.darkgreen},
width=50)
sfw._sf_radratio_slider = ui.FloatSlider(model=sfw._sf_radratio_slider_model,
min=0.0, max=1.0, step=0.01,
style={'background_color': sfw.darkblue}).model
# SF Gen Mode Combo Box
with ui.HStack():
ui.Label("Gen Mode:")
model = sfw._genmodebox_model
idx = model.as_int
sfw._genmodebox_model = ui.ComboBox(idx, *sff.GetGenModes()).model.get_item_value_model()
# SF Form Combo Box
with ui.HStack():
ui.Label("Gen Form1:")
model = sfw._genformbox_model
idx = model.as_int
sfw._genformbox_model = ui.ComboBox(idx, *sff.GetGenForms()).model.get_item_value_model()
with ui.VStack():
sfw._sf_nlat_but = ui.Button(f"Nlat:{smf.p_nlat}",
style={'background_color': sfw.darkgreen},
mouse_pressed_fn= # noqa : E251
lambda x, y, b, m: sfc.on_click_nlat(x, y, b, m))
sfw._sf_nlng_but = ui.Button(f"Nlng:{smf.p_nlng}",
style={'background_color': sfw.darkgreen},
mouse_pressed_fn= # noqa : E251
lambda x, y, b, m: sfc.on_click_nlng(x, y, b, m))
class SfcTabShapes(BaseTab):
sfw: SfcWindow
sfc: SfControls
def __init__(self, sfw: SfcWindow):
super().__init__("Shapes")
self.sfw = sfw
self.sfc = sfw.sfc
def build_fn(self):
print("SfcTabShapes.build_fn (trc)")
sfc = self.sfc
sfw = self.sfw
# print(f"SfcTabShapes.build_fn {type(sfc)}")
with ui.VStack(style={"margin": sfw.marg}):
with ui.HStack():
sfw._sf_spawn_but = ui.Button("Spawn Prim",
style={'background_color': sfw.darkred},
clicked_fn=lambda: sfc.on_click_spawnprim())
sfw._sf_primtospawn_but = ui.Button(f"{sfc._curprim}",
style={'background_color': sfw.darkpurple},
clicked_fn=lambda: sfc.on_click_changeprim())
class SfcTabMaterials(BaseTab):
sfw: SfcWindow
sfc: SfControls
def __init__(self, sfw: SfcWindow):
super().__init__("Materials")
self.sfw = sfw
self.sfc = sfw.sfc
# print("SfcTabMaterials.build_fn {sfc}")
def build_fn(self):
print("SfcTabMaterials.build_fn (trc)")
sfw = self.sfw
sfc = self.sfc
with ui.VStack(style={"margin": sfw.marg}):
# Material Combo Box
with ui.HStack():
ui.Label("SF Material 1:")
idx = sfc._matkeys.index(sfc._current_material_name)
sfw._sf_matbox = ui.ComboBox(idx, *sfc._matkeys)
sfw._sf_matbox_model = sfw._sf_matbox.model.get_item_value_model()
print("built sfw._sf_matbox")
with ui.HStack():
ui.Label("SF Material 2:")
# use the alternate material name
idx = sfc._matkeys.index(sfc._current_alt_material_name)
sfw._sf_alt_matbox = ui.ComboBox(idx, *sfc._matkeys)
sfw._sf_alt_matbox_model = sfw._sf_alt_matbox.model.get_item_value_model()
print("built sfw._sf_matbox")
# Bounds Material Combo Box
with ui.HStack():
ui.Label("Bounds Material:")
idx = sfc._matkeys.index(sfc._current_bbox_material_name)
sfw._bb_matbox = ui.ComboBox(idx, *sfc._matkeys)
sfw._bb_matbox_model = sfw._bb_matbox.model.get_item_value_model()
# Bounds Material Combo Box
with ui.HStack():
ui.Label("Floor Material:")
idx = sfc._matkeys.index(sfc._current_floor_material_name)
sfw._sf_floor_matbox = ui.ComboBox(idx, *sfc._matkeys)
sfw._sf_floor_matbox_model = sfw._sf_floor_matbox.model.get_item_value_model()
class SfcTabOptions(BaseTab):
sfw: SfcWindow
sfc: SfControls
def __init__(self, sfw: SfcWindow):
super().__init__("Options")
self.sfw = sfw
self.sfc = sfw.sfc
# print("SfcTabOptions.build_fn {sfc}")
def build_fn(self):
print("SfcTabOptions.build_fn (trc)")
sfw = self.sfw
sfc = self.sfc # noqa : F841
with ui.VStack(style={"margin": sfw.marg}):
with ui.HStack():
ui.Label("Write Perf Log: ")
sfw.writelog_checkbox = ui.CheckBox(model=sfw.writelog_checkbox_model,
width=40, height=10, name="writelog", visible=True)
with ui.HStack():
ui.Label("Log Series Name:")
sfw.writelog_seriesname = ui.StringField(model=sfw.writelog_seriesname_model,
width=200, height=20, visible=True)
| 19,270 |
Python
| 46.818858 | 117 | 0.486404 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/spheremesh.py
|
import omni.kit.commands as okc
import omni.usd
import math
import numpy as np
from pxr import Gf, Sdf, Usd, UsdGeom, UsdShade, Vt
from .ovut import MatMan, delete_if_exists
class SphereMeshFactoryV1():
_show_normals = False
_matman = None
_nlat = 8
_nlng = 8
_total_quads = 0
def __init__(self) -> None:
# self._stage = omni.usd.get_context().get_stage()
pass
def GenPrep(self):
pass
def LoadSettings(self):
print("SphereMeshFactoryV1.LoadSettings (trc)")
def MakeMarker(self, name: str, matname: str, cenpt: Gf.Vec3f, rad: float):
# print(f"MakeMarker {name} {cenpt} {rad}")
primpath = f"/World/markers/{name}"
delete_if_exists(primpath)
okc.execute('CreateMeshPrimWithDefaultXform', prim_type="Sphere", prim_path=primpath)
sz = rad/100
okc.execute('TransformMultiPrimsSRTCpp',
count=1,
paths=[primpath],
new_scales=[sz, sz, sz],
new_translations=[cenpt[0], cenpt[1], cenpt[2]])
stage = omni.usd.get_context().get_stage()
prim: Usd.Prim = stage.GetPrimAtPath(primpath)
mtl = self._matman.GetMaterial(matname)
UsdShade.MaterialBindingAPI(prim).Bind(mtl)
def CreateMesh(self, name: str, matname: str, cenpt: Gf.Vec3f, radius: float):
# This will create nlat*nlog quads or twice that many triangles
# it will need nlat+1 vertices in the latitude direction and nlong vertices in the longitude direction
# so a total of (nlat+1)*(nlong) vertices
stage = omni.usd.get_context().get_stage()
spheremesh = UsdGeom.Mesh.Define(stage, name)
nlat = self._nlat
nlng = self._nlng
vtxcnt = int(0)
pts = []
nrm = []
txc = []
fvc = []
idx = []
polegap = 0.01 # prevents the vertices from being exactly on the poles
for i in range(nlat+1):
for j in range(nlng):
theta = polegap + (i * (math.pi-2*polegap) / float(nlat))
phi = j * 2 * math.pi / float(nlng)
nx = math.sin(theta) * math.cos(phi)
ny = math.cos(theta)
nz = math.sin(theta) * math.sin(phi)
x = radius * nx
y = radius * ny
z = radius * nz
rawpt = Gf.Vec3f(x, y, z)
nrmvek = Gf.Vec3f(nx, ny, nz)
pt = rawpt + cenpt
nrm.append(nrmvek)
pts.append(pt)
txc.append((x, y))
if self._show_normals:
ptname = f"ppt_{i}_{j}"
npt = Gf.Vec3f(x+nx, y+ny, z+nz)
nmname = f"npt_{i}_{j}"
self.MakeMarker(ptname, "red", pt, 1)
self.MakeMarker(nmname, "blue", npt, 1)
for i in range(nlat):
offset = i * nlng
for j in range(nlng):
fvc.append(int(4))
if j < nlng - 1:
i1 = offset+j
i2 = offset+j+1
i3 = offset+j+nlng+1
i4 = offset+j+nlng
else:
i1 = offset+j
i2 = offset
i3 = offset+nlng
i4 = offset+j+nlng
idx.extend([i1, i2, i3, i4])
# print(f"i:{i} j:{j} vtxcnt:{vtxcnt} i1:{i1} i2:{i2} i3:{i3} i4:{i4}")
vtxcnt += 1
# print(len(pts), len(txc), len(fvc), len(idx))
spheremesh.CreatePointsAttr(pts)
spheremesh.CreateNormalsAttr(nrm)
spheremesh.CreateFaceVertexCountsAttr(fvc)
spheremesh.CreateFaceVertexIndicesAttr(idx)
spheremesh.CreateExtentAttr([(-radius, -radius, -radius), (radius, radius, radius)])
texCoords = UsdGeom.PrimvarsAPI(spheremesh).CreatePrimvar("st",
Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.varying)
texCoords.Set(txc)
# prim: Usd.Prim = self._stage.GetPrimAtPath(primpath)
mtl = self._matman.GetMaterial(matname)
UsdShade.MaterialBindingAPI(spheremesh).Bind(mtl)
self._total_quads += len(fvc) # face vertex counts
return spheremesh
class SphereMeshFactory():
_show_normals = False
_matman: MatMan = None
p_nlat = 8
p_nlng = 8
_total_quads = 0
_dotexcoords = True
def __init__(self, matman: MatMan) -> None:
self._matman = matman
# self._stage = omni.usd.get_context().get_stage()
def GenPrep(self):
self._nquads = self.p_nlat*self.p_nlng
self._nverts = (self.p_nlat+1)*(self.p_nlng)
self._normbuf = np.zeros((self._nverts, 3), dtype=np.float32)
self._txtrbuf = np.zeros((self._nverts, 2), dtype=np.float32)
self._facebuf = np.zeros((self._nquads, 1), dtype=np.int32)
self._vidxbuf = np.zeros((self._nquads, 4), dtype=np.int32)
self.MakeArrays()
def Clear(self):
pass
def MakeMarker(self, name: str, matname: str, cenpt: Gf.Vec3f, rad: float):
# print(f"MakeMarker {name} {cenpt} {rad}")
primpath = f"/World/markers/{name}"
delete_if_exists(primpath)
okc.execute('CreateMeshPrimWithDefaultXform', prim_type="Sphere", prim_path=primpath)
sz = rad/100
okc.execute('TransformMultiPrimsSRTCpp',
count=1,
paths=[primpath],
new_scales=[sz, sz, sz],
new_translations=[cenpt[0], cenpt[1], cenpt[2]])
stage = omni.usd.get_context().get_stage()
prim: Usd.Prim = stage.GetPrimAtPath(primpath)
mtl = self._matman.GetMaterial(matname)
UsdShade.MaterialBindingAPI(prim).Bind(mtl)
def MakeArrays(self):
nlat = self.p_nlat
nlong = self.p_nlng
for i in range(nlat):
offset = i * nlong
for j in range(nlong):
if j < nlong - 1:
i1 = offset+j
i2 = offset+j+1
i3 = offset+j+nlong+1
i4 = offset+j+nlong
else:
i1 = offset+j
i2 = offset
i3 = offset+nlong
i4 = offset+j+nlong
vidx = i*nlong+j
self._facebuf[vidx] = 4
self._vidxbuf[vidx] = [i1, i2, i3, i4]
polegap = 0.01 # prevents the vertices from being exactly on the poles
for i in range(nlat+1):
theta = polegap + (i * (math.pi-2*polegap) / float(nlat))
st = math.sin(theta)
ct = math.cos(theta)
for j in range(nlong):
phi = j * 2 * math.pi / float(nlong)
sp = math.sin(phi)
cp = math.cos(phi)
nx = st*cp
ny = ct
nz = st*sp
nrmvek = Gf.Vec3f(nx, ny, nz)
vidx = i*nlong+j
self._normbuf[vidx] = nrmvek
self._txtrbuf[vidx] = (nx, ny)
# print("MakeArrays done")
def ShowNormals(self, vertbuf):
nlat = self.p_nlat
nlong = self.p_nlng
for i in range(nlat+1):
for j in range(nlong):
vidx = i*nlong+j
ptname = f"ppt_{i}_{j}"
(x, y, z) = vertbuf[vidx]
(nx, ny, nz) = self._nromtbuf[vidx]
pt = Gf.Vec3f(x, y, z)
npt = Gf.Vec3f(x+nx, y+ny, z+nz)
nmname = f"npt_{i}_{j}"
self.MakeMarker(ptname, "red", pt, 1)
self.MakeMarker(nmname, "blue", npt, 1)
def CreateMesh(self, name: str, matname: str, cenpt: Gf.Vec3f, radius: float):
# This will create nlat*nlog quads or twice that many triangles
# it will need nlat+1 vertices in the latitude direction and nlong vertices in the longitude direction
# so a total of (nlat+1)*(nlong) vertices
stage = omni.usd.get_context().get_stage()
spheremesh = UsdGeom.Mesh.Define(stage, name)
# note that vertbuf is local to this function allowing it to be changed in a multithreaded environment
vertbuf = self._normbuf*radius + cenpt
if self._show_normals:
self.ShowNormals(vertbuf)
if self._dotexcoords:
texCoords = UsdGeom.PrimvarsAPI(spheremesh).CreatePrimvar("st",
Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.varying)
texCoords.Set(Vt.Vec2fArray.FromNumpy(self._txtrbuf))
spheremesh.CreatePointsAttr(Vt.Vec3dArray.FromNumpy(vertbuf))
spheremesh.CreateNormalsAttr(Vt.Vec3dArray.FromNumpy(self._normbuf))
spheremesh.CreateFaceVertexCountsAttr(Vt.IntArrayFromBuffer(self._facebuf))
spheremesh.CreateFaceVertexIndicesAttr(Vt.IntArrayFromBuffer(self._vidxbuf))
mtl = self._matman.GetMaterial(matname)
UsdShade.MaterialBindingAPI(spheremesh).Bind(mtl)
self._total_quads += self._nquads # face vertex counts
return None
async def CreateVertBuf(self, radius, cenpt):
vertbuf = self._normbuf*radius + cenpt
return vertbuf
async def CreateStuff(self, spheremesh, vertbuf, normbuf, facebuf, vidxbuf):
spheremesh.CreatePointsAttr(Vt.Vec3dArray.FromNumpy(vertbuf))
spheremesh.CreateNormalsAttr(Vt.Vec3dArray.FromNumpy(normbuf))
spheremesh.CreateFaceVertexCountsAttr(Vt.IntArrayFromBuffer(facebuf))
spheremesh.CreateFaceVertexIndicesAttr(Vt.IntArrayFromBuffer(vidxbuf))
return
async def CreateMeshAsync(self, name: str, matname: str, cenpt: Gf.Vec3f, radius: float):
# This will create nlat*nlog quads or twice that many triangles
# it will need nlat+1 vertices in the latitude direction and nlong vertices in the longitude direction
# so a total of (nlat+1)*(nlong) vertices
stage = omni.usd.get_context().get_stage()
spheremesh = UsdGeom.Mesh.Define(stage, name)
# note that vertbuf is local to this function allowing it to be changed in a multithreaded environment
vertbuf = await self.CreateVertBuf(radius, cenpt)
if self._show_normals:
self.ShowNormals(vertbuf)
if self._dotexcoords:
texCoords = UsdGeom.PrimvarsAPI(spheremesh).CreatePrimvar("st",
Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.varying)
texCoords.Set(Vt.Vec2fArray.FromNumpy(self._txtrbuf))
await self.CreateStuff(spheremesh, vertbuf, self._normbuf, self._facebuf, self._vidxbuf)
mtl = self._matman.GetMaterial(matname)
UsdShade.MaterialBindingAPI(spheremesh).Bind(mtl)
self._total_quads += self._nquads # face vertex counts
return None
| 11,256 |
Python
| 38.36014 | 110 | 0.543088 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/omni/sphereflake/tests/testsyspath.py
|
import sys
def write_out_syspath(fname: str = 'd:/nv/ov/syspath.txt', indent=False) -> None:
# Write out the python syspath to a file
# Indent should be True if to be used for the settings.json python.analsys.extraPaths setting
pplist = sys.path
with open(fname, 'w') as f:
for line in pplist:
nline = line.replace("\\", "/")
if indent:
nnline = f" \"{nline}\",\n"
else:
nnline = f"\"{nline}\",\n"
f.write(nnline)
def read_in_syspath(fname: str = 'd:/nv/ov/syspath.txt') -> None:
# Read in the python path from a file
with open(fname, 'r') as f:
for line in f:
nline = line.replace(',', '')
nline = nline.replace('"', '')
nline = nline.replace('"', '')
nline = nline.replace('\n', '')
nline = nline.replace(' ', '')
sys.path.append(nline)
write_out_syspath("syspath-before.txt")
read_in_syspath("syspath-before.txt")
write_out_syspath("syspath-after.txt")
| 1,084 |
Python
| 30.911764 | 97 | 0.52952 |
mike-wise/kit-exts-spawn-prims/exts/omni.sphereflake/docs/README.md
|
# Python SphereFlake Benchmark
This creates an adjustable number of SphereFlakes with a variety of methods for the puruposes of benchmarking Omniverse
| 153 |
Markdown
| 29.799994 | 119 | 0.836601 |
sajith0481/kit-extension-template/exts/omni.hello.world/omni/hello/world/extension.py
|
import omni.ext
import omni.ui as ui
import omni.kit.commands
# Functions and vars are available to other extension as usual in python: `example.python_ext.some_public_function(x)`
def some_public_function(x: int):
print(f"[omni.hello.world] some_public_function was called with {x}")
return x ** x
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[omni.hello.world] MyExtension startup")
self._count = 0
self._window = ui.Window("Spawn a cube", width=300, height=300)
with self._window.frame:
with ui.VStack():
ui.Label("Some label")
def on_click():
print("made a cube!")
omni.kit.commands.execute('CreatePrimWithDefaultXform',
prim_type='Cube',
attributes={'size': 100.0, 'extent': [(-50.0, -50.0, -50.0), (50.0, 50.0, 50.0)]})
ui.Button("Spawn Cube", clicked_fn=lambda: on_click())
def on_shutdown(self):
print("[omni.hello.world] MyExtension shutdown")
| 1,543 |
Python
| 36.658536 | 119 | 0.625405 |
parkerjgit/omniverse-sandbox/README.md
|
# Omniverse Sandbox
This repository is a sandbox for minimal examples and proofs of concept. See readme files in subdirectories for details:
* [Omniverse KIT Extensions](./poc.extensions/readme.md)
* [Omniverse Farm on AKS](./poc.farmOnAks/readme.md)
* [Omniverse Farm on Windows](./poc.farmOnWindows/readme.md)
* [Omniverse Farm on Linux](./poc.farmOnLinux/readme.md)
## References
* [A Deep Dive into Building Microservices with Omniverse](https://www.nvidia.com/en-us/on-demand/session/gtcfall21-a31204/)
* [Companion Code to A Deep Dive into Building Microservices with Omniverse](https://github.com/NVIDIA-Omniverse/deep-dive-into-microservices)
* [Developer Breakout: Build Your Own Microservices on Omniverse](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-se2306/)
* [Omniverse Kit: Microservices](https://www.youtube.com/watch?v=oSOhtxPJ5CU)
* [Omniverse microservice tutorials, 1](http://localhost:8211/tutorials/docs/source/extensions/omni.services.tutorials.one/docs/README.html)
* [Microservice Tutorial 1 Video](https://drive.google.com/file/d/1zzX4BJ9MLdfo2VMhFDsAISFGBKBhko2P/view)
* [Omniverse microservice tutorials, 2](http://localhost:8211/tutorials/docs/source/extensions/omni.services.tutorials.two/docs/README.html)
* [Microservice Tutorial 2 Video](https://drive.google.com/file/d/1QBu6zfl2CWMFMfWf_Ui0MUiNK4VHgZHZ/view)
* [Omniverse Kit Extensions Project Template](https://github.com/NVIDIA-Omniverse/kit-extension-template)
* [Omniverse Services](https://docs.omniverse.nvidia.com/prod_services/prod_services/overview.html)
| 1,566 |
Markdown
| 73.619044 | 142 | 0.794381 |
parkerjgit/omniverse-sandbox/poc.farmOnWindows/readme.md
|
# Omniverse Farm on Windows (local)
Proof of concept for installing and running farm as standalone service on local windows machine.
## Prerequisites
* [Omniverse Launcher](https://docs.omniverse.nvidia.com/prod_launcher/prod_launcher/installing_launcher.html) installed.
## Setup
1. Setup Queue
1. Install/launch Farm Queue (service) from Omniverse Launcher
1. Configure Queue URL (for agents to connect. Also, this is base url for api endpoint)
1. Navigate to Queue Management dashboard(s):
```sh
# http://<queue_url>/queue/management/ui/
# http://<queue_url>/queue/management/dashboard
http://localhost:8222/queue/management/ui/
http://localhost:8222/queue/management/dashboard
```
1. Find Queue Management API docs
```sh
# http://<queue_url>/docs
http://localhost:8222/docs
```
1. Perform health check
```
curl -X GET 'http://localhost:8222/status' \
-H 'accept: application/json'
```
1. Setup Agent
1. Install/launch Farm Agent from Omniverse Launcher
1. Connect Agent to Queue by configuring the Queue address (and clicking "Connect"):
```sh
# http://<queue_url>
http://localhost:8222
```
1. Configure Job directories:
```
C:\Users\nycjyp\Code\sandbox\omniverse\poc.farmOnWindows\agent\jobs\*
```
1. Navigate to Job Management Dashboard:
```sh
# http://<agent_url>/agent/management/ui/
http://localhost:8223/agent/management/ui/
```
1. Find Agent Management API docs:
```sh
# http://<agent_url>/docs
http://localhost:8223/docs
```
1. Perform health check
```
curl -X GET 'http://localhost:8223/status' \
-H 'accept: application/json'
```
## first job (simple command)
1. Create `hello-omniverse.kit` file in `hello-omniverse` folder within the configured `jobs` directory and copy and paste contents of [hello world example](https://docs.omniverse.nvidia.com/app_farm/app_farm/guides/creating_job_definitions.html#job-definition-schema-system-executables) (see [schema](https://docs.omniverse.nvidia.com/app_farm/app_farm/guides/creating_job_definitions.html#schema-reference))
1. Verify Job has been added by Getting list of jobs
```
curl -X GET 'http://localhost:8223/agent/operator/available' \
-H 'accept: application/json'
```
1. Submit task using `queue/management/tasks/submit` endpoint:
```
curl -X POST "http://localhost:8222/queue/management/tasks/submit" \
--header "Accept: application/json" \
--header "Content-Type: application/json" \
--data '{"user":"my-user-id","task_type":"hello-omniverse","task_args":{},"task_comment":"My first job!"}'
```
1. Get Status of task:
```sh
# of task with task id (returned when you submitted task)
curl -X GET 'http://localhost:8222/queue/management/tasks/info/848973c4-5864-416b-976f-56a94cfc8258' \
-H 'accept: application/json'
# of all tasks matching type:
curl -X GET 'http://localhost:8222/queue/management/tasks/list?task_type=hello-omniverse' \
-H 'accept: application/json'
```
* Note, "echo" command was not found, so replaced with "systeminfo" and removed args.
## second job (simple python script)
## ISSUES
* getting echo not found error back from agent.
* when I add job directories
## Questions:
* where is api docs for queue? https://forums.developer.nvidia.com/t/farm-queue-management-api-documentation/237548
* how to package script and dependencies?
* how to verify when agents are registered for job?
* How do we distribute jobs to agents?
| 3,746 |
Markdown
| 37.628866 | 409 | 0.658836 |
parkerjgit/omniverse-sandbox/poc.extensions/readme.md
|
# Creating microservice extensions
## Minimal Service
1. Prerequisites:
* Kit installed and added to path.
1. Register request handler with an endpoint:
```py
from omni.services.core import main
def hello_world():
return "hello world"
main.register_endpoint("get", "/hello-world", hello_world)
```
1. Start service with `kit.exe` by passing config.
```
kit \
--exec hello_world.py \
--enable omni.services.core \
--enable omni.services.transport.server.http \
--/exts/omni.kit.registry.nucleus/registries/0/name=kit/services \
--/exts/omni.kit.registry.nucleus/registries/0/url=https://dw290v42wisod.cloudfront.net/exts/kit/services
```
> Note, should see "app ready" logged after all dependancies are resolved and started up.
1. Navigate to OpenAPI docs at `http://localhost:8011/docs` which now includes `/hello-world` endpoint.
1. Test endpoint:
```
curl -X 'GET' \
'http://localhost:8011/hello-world' \
-H 'accept: application/json'
```
## Simple Service (linked to app)
1. Generate "New Extension Template Project" scaffolding, from Omniverse Code Extensions tab, by clicking "+" icon.
* **name** - repo root directory, can be anything, e.g. "simple-service"
* **id** - python module (namespace), e.g. "poc.services.simple"
1. [If nec.] [Link with Omniverse app](https://github.com/NVIDIA-Omniverse/kit-extension-template#linking-with-an-omniverse-app)
```
link_app.sh --app kit
```
1. Verify that python module is correctly specified in [config/extension.toml](./simple-service/exts/poc.services.simple/config/extension.toml) file:
```toml
# available as "import poc.services.simple"
[[python.module]]
name = "poc.services.simple"
```
1. Add `omni.services.core` to dependancies in [config/extension.toml](./simple-service/exts/poc.services.simple/config/extension.toml) file:
```toml
[dependencies]
"omni.services.core" = {}
```
1. Implement `on_startup` and `on_shutdown` methods to register/deregister handler with endpoint (in [extension.py](./simple-service/exts/poc.services.simple/poc/services/simple/extension.py) file):
```py
import omni.ext
from omni.services.core import main
def hello_world():
return "hello world"
class PocServicesSimpleExtension(omni.ext.IExt):
def on_startup(self, ext_id):
main.register_endpoint("get", "/hello-world", hello_world)
def on_shutdown(self):
main.deregister_endpoint("get", "/hello-world")
```
1. Launch Omniverse Code with `poc.services.simple` extension enabled
```
app\omni.code.bat ^
--ext-folder ./exts ^
--enable poc.services.simple
```
1. Navigate to OpenAPI docs at `http://localhost:8211/docs` which include `/hello-world` endpoint. (Note, port will depend on host application. See below)
1. Test endpoint:
```
curl -X 'GET' \
'http://localhost:8211/hello-world' \
-H 'accept: application/json'
```
Notes:
* Omniverse applications run a webserver and expose an api by default, so if you are running service from an UI app, e.g. Code, Create, etc, then you do not need to manually run a webserver, eg., with `omni.services.transport.server.http`. Note, by default, single instances of omniverse applications use the following ports: Kit - 8011, Create - 8111, Code - 8211.
* Get Port:
```py
import carb
http_server_port = carb.settings.get_settings().get_as_int("exts/omni.services.transport.server.http/port")
carb.log_info(f"The OpenAPI specifications can be accessed at: http://localhost:{http_server_port}/docs")
```
* Configure Port:
```toml
"omni.services.transport.server.http".port = 8311
```
## Headless service
1. Prerequisites
* Kit installed and added to path
1. Clone [Omniverse Kit Extensions Project Template](https://github.com/NVIDIA-Omniverse/kit-extension-template) into project root
```
git clone [email protected]:NVIDIA-Omniverse/kit-extension-template.git .
```
1. Link with omniverse app (creates app sym link)
```sh
# windows
link_app.bat --app code
# linux
link_app.sh -app code
```
1. Start service by passing configuration to `kit.exe`
```
app/kit.exe \
--ext-folder ./exts \
--enable poc.services.headless \
--enable omni.services.transport.server.http \
--/exts/omni.kit.registry.nucleus/registries/0/name=kit/services \
--/exts/omni.kit.registry.nucleus/registries/0/url=https://dw290v42wisod.cloudfront.net/exts/kit/services
```
> Note Omniverse services extensions do not ship with kit, which is why we are passing in registery name and address, where they can be downloaded from.
> While we can launch extension headlessly this way, if we *really* want to run headlessly, we should [create an app](https://docs.omniverse.nvidia.com/kit/docs/kit-manual/104.0/guide/creating_kit_apps.html#building-an-app) (ie., an entry point). Recall, that an app is just a `.kit` file.
1. Create App, ie., a `.kit` file:
```toml
[settings.app.exts.folders]
'++' = ["./exts"]
[settings.exts]
"omni.services.transport.server.http".port = 8311
"omni.kit.registry.nucleus".registries = [
{ name = "kit/services", url = "https://dw290v42wisod.cloudfront.net/exts/kit/services"},
]
[dependencies]
"omni.services.transport.server.http" = {}
"poc.services.headless" = {}
```
1. Start service
```
kit headless-service.kit
```
## Advanced Service
1. Prerequisites
* Kit installed and added to path
1. Clone [Omniverse Kit Extensions Project Template](https://github.com/NVIDIA-Omniverse/kit-extension-template) into project root
```
git clone [email protected]:NVIDIA-Omniverse/kit-extension-template.git .
```
1. Create app sym link (ie. [Linking with an Omniverse app](https://github.com/NVIDIA-Omniverse/kit-extension-template#linking-with-an-omniverse-app))
```sh
link_app.bat --app code # windows
link_app.sh --app code # linux
```
1. Start Code w/ service enabled (ie. [Add a new extension to your Omniverse App](https://github.com/NVIDIA-Omniverse/kit-extension-template#add-a-new-extension-to-your-omniverse-app))
```
./app/kit/kit.exe \
./app/apps/omni.code.kit \
--ext-folder ./exts \
--enable omni.hello.world \
--enable poc.services.adv \
--enable omni.services.assets.validate \
--enable omni.services.assets.convert \
--/exts/omni.kit.registry.nucleus/registries/0/name=kit/services \
--/exts/omni.kit.registry.nucleus/registries/0/url=https://dw290v42wisod.cloudfront.net/exts/kit/services
```
1. Start service headlessly
```
kit adv-service.kit
```
* get extension name error expected.
1. Test endpoint
```
curl -X 'POST' \
'http://localhost:8311/viewport-capture/capture' \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"usd_stage_path": "omniverse://a6f20fa6-28fe-4e4d-8b5f-ca35bc7f5c90.cne.ngc.nvidia.com/NVIDIA/Samples/Astronaut/Astronaut.usd"
}'
```
Notes:
* `carb.log_warn()` logs to stdout.
* `carb.settings.get_settings().get_as_string` gets arbitrary settings from config
Questions:
* How to install launcher on wsl2?
## Containerized Service
1. [If nec.] Generate API Key [here](https://ngc.nvidia.com/setup/api-key)
1. Login to nvcr.io using API Key and username '$oauthtoken'
```
docker login nvcr.io
```
1. Create a [dockerfile](./containerized-service/dockerfile) that adds [hello_world.py](./containerized-service/hello_world.py) to [KIT SDK base image](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/omniverse/containers/kit)
```dockerfile
# Start with Kit Base image
FROM nvcr.io/nvidia/omniverse/kit:103.5.1
# Install services dependencies on image (omni.services does not ship with Kit)
# This code is pulled from a extension registry and the --ext-precache-mode will pull down the extensions and exit.
RUN /opt/nvidia/omniverse/kit-sdk-launcher/kit \
--ext-precache-mode \
--enable omni.services.core \
--enable omni.services.transport.server.http \
--/exts/omni.kit.registry.nucleus/registries/0/name=kit/services \
--/exts/omni.kit.registry.nucleus/registries/0/url=https://dw290v42wisod.cloudfront.net/exts/kit/services \
--allow-root
# Add script to image
COPY hello_world.py /hello_world.py
# Declare *intention* for container to use port 8011 at runtime
EXPOSE 8011/tcp
# Configure container as an executable
ENTRYPOINT [ \
"/opt/nvidia/omniverse/kit-sdk-launcher/kit", \
"--exec", "hello_world.py", \
"--enable omni.services.core", \
"--enable", "omni.services.transport.server.http", \
"--allow-root"]
```
1. Build a new image from dockerfile named "hello-world"
```sh
docker build -t hello-world .
docker images hello-world
```
1. Create a new executable container from latest hello-world image and run it locally on port 8011.
```
docker run -it -p 8011:8011 hello-world:latest
```
1. Navigate to OpenAPI docs at `http://localhost:8011/docs` which now include `/hello-world` endpoint.
1. Test endpoint:
```
curl -X 'GET' \
'http://localhost:8011/hello-world' \
-H 'accept: application/json'
```
## Deploy Containerized App to ACI
1. Host in container registry
1. [if nec] Create an Azure Container Registry (ACR)
```sh
# az acr create --resource-group <resource-group> --name <acrName> --sku Basic
az acr create --resource-group dt-sandbox-resources --name ovfarmacr --sku Basic --admin-enabled true
```
1. Log in to container registry
```sh
# az acr login --name <acrName>
az acr login --name ovfarmacr
```
1. Tag image
```sh
# get full name of ACR instance (e.g., ovfarmacr.azurecr.io)
az acr show --name ovfarmacr --query loginServer --output table
# docker tag <source_image>:<tag> <acr_name>.azurecr.io/<target_image>:<tag>
docker tag hello-world:latest ovfarmacr.azurecr.io/hello-world:latest
```
1. Push image to container registry
```sh
docker push ovfarmacr.azurecr.io/hello-world:latest
# verify image is now stored in registry
az acr repository show --name ovfarmacr --repository hello-world
az acr repository list --name ovfarmacr --output table
az acr repository show-tags --name ovfarmacr --repository hello-world --output table
```
1. Deploy App (using az container create)
1. Get ACR credentials
```
az acr credential show -g dt-sandbox-resources -n ovfarmacr
```
1. Create Container Group
```
ACR_PASSWORD=<acr_password>
az container create \
--resource-group dt-sandbox-resources \
--name ov-demo-microservice \
--image ovfarmacr.azurecr.io/hello-world:latest \
--registry-login-server ovfarmacr.azurecr.io \
--registry-username ovfarmacr \
--registry-password $ACR_PASSWORD \
--ip-address Public \
--dns-name-label ov-demo-microservice \
--ports 8011
```
1. test endpoints
```
http://ov-demo-microservice.eastus.azurecontainer.io:8011/docs
http://ov-demo-microservice.eastus.azurecontainer.io:8011/hello-world
http://ov-demo-microservice.eastus.azurecontainer.io:8011/status
```
## Azure storage stervice
1. Start service headlessly
```
kit app.kit
```
1. Start service in container
```
docker compose up
```
1. Clear container cache and restart service
```
docker compose down --rmi local --volumes --remove-orphans
docker compose build --no-cache
docker compose up
```
## Ref
* [Omniverse Services Getting Started](https://docs.omniverse.nvidia.com/prod_services/prod_services/design/getting_started.html)
* [Omniverse microservice tutorials, 1](http://localhost:8211/tutorials/docs/source/extensions/omni.services.tutorials.one/docs/README.html)
* [Omniverse microservice tutorials, 2](http://localhost:8211/tutorials/docs/source/extensions/omni.services.tutorials.two/docs/README.html)
* [Omniverse Kit Extensions Project Template](https://github.com/NVIDIA-Omniverse/kit-extension-template)
* [Companion Code to A Deep Dive into Building Microservices with Omniverse](https://github.com/NVIDIA-Omniverse/deep-dive-into-microservices)
* [Tutorial: Implementing a Viewport Capture Service](https://docs.omniverse.nvidia.com/prod_services/prod_services/tutorials/viewport-capture/index.html)
| 12,923 |
Markdown
| 38.888889 | 365 | 0.668266 |
parkerjgit/omniverse-sandbox/poc.extensions/minimal-service/hello_world.py
|
from omni.services.core import main
def hello_world():
return "hello world"
main.register_endpoint("get", "/hello-world", hello_world)
| 140 |
Python
| 22.499996 | 58 | 0.728571 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/poc.services.adv/config/extension.toml
|
[package]
# Semantic Versionning is used: https://semver.org/
version = "1.0.0"
# The title and description fields are primarily for displaying extension info in UI
title = "Viewport capture service"
description = "Sample service example demonstrating the creation of microservices using Omniverse."
# Path (relative to the root) or content of readme markdown file for UI.
readme = "docs/README.md"
# Path (relative to the root) of changelog
changelog = "docs/CHANGELOG.md"
# URL of the extension source repository.
repository = "https://github.com/parkerjgit/omniverse-sandbox/tree/main/poc.extensions/adv-service"
# One of categories for UI.
category = "services"
# Keywords for the extension
keywords = ["kit", "service"]
# Icon to show in the extension manager
icon = "data/icon.png"
# Preview to show in the extension manager
preview_image = "data/preview.png"
# Use omni.ui to build simple UI
[dependencies]
"omni.kit.menu.edit" = {}
"omni.kit.actions.core" = {}
"omni.services.core" = {}
"omni.services.transport.server.http" = {}
"omni.usd" = {}
# Main python module this extension provides, it will be publicly available as "import omni.hello.world".
[[python.module]]
name = "poc.services.adv"
# Settings of our extension:
[settings.exts."poc.services.adv"]
# URL prefix where the service will be mounted, where our API will be available to handle incoming requests.
#
# Defining this as a setting makes it easy to change or rebrand the endpoint using only command-line or KIT-file
# configuration instructions, should extensions ever feature conflicting endpoint naming conventions.
url_prefix = "/viewport-capture"
# Path from where the captured images will be served from, when exposed to clients.
#
# This path will be mounted as a child of the `url_prefix` setting, and expressed as a formatted join of the
# `{url_prefix}{capture_path}` settings.
capture_path = "/static"
# Name of the directory on the server where captured images will be stored:
capture_directory = "captured_stage_images"
| 2,022 |
TOML
| 33.87931 | 112 | 0.754698 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/poc.services.adv/poc/services/adv/extension.py
|
import os
from fastapi.staticfiles import StaticFiles
import carb
import omni.ext
from omni.services.core import main
# As most of the features of our API are implemented by the means of function handlers in the `/services` sub-folder,
# the main duty of our extension entrypoint is to register our Service's `@router` and dictate when its capability
# should be enabled or disabled under the guidance of the User or the dependency system.
from .services.capture import router
# For convenience, let's also reuse the utility methods we already created to handle and format the storage location of
# the captured images so they can be accessed by clients using the server, once API responses are issued from our
# Service:
from .utils import get_captured_image_directory, get_captured_image_path
# Any class derived from `omni.ext.IExt` in the top level module (defined in the `python.module` section of the
# `extension.toml` file) will be instantiated when the extension is enabled, and its `on_startup(ext_id)` method
# will be called. When disabled or when the application is shut down, its `on_shutdown()` will be called.
class ViewportCaptureExtension(omni.ext.IExt):
"""Sample extension illustrating registration of a service."""
# `ext_id` is the unique identifier of the extension, containing its name and semantic version number. This
# identifier can be used in conjunction with the Extension Manager to query for additional information, such
# as the extension's location on the filesystem.
def on_startup(self, ext_id: str) -> None:
ext_name = ext_id.split("-")[0]
carb.log_info("ViewportCaptureExtension startup")
# At this point, we register our Service's `router` under the prefix we gave our API using the settings system,
# to facilitate its configuration and to ensure it is unique from all other extensions we may have enabled:
url_prefix = carb.settings.get_settings().get_as_string(f"exts/{ext_name}/url_prefix")
main.register_router(router=router, prefix=url_prefix, tags=["Viewport capture"],)
# Proceed to create a temporary directory in the Omniverse application file hierarchy where captured stage
# images will be stored, until the application is shut down:
captured_stage_images_directory = get_captured_image_directory()
if not os.path.exists(captured_stage_images_directory):
os.makedirs(captured_stage_images_directory)
# Register this location as a mount, so its content is served by the web server bundled with the Omniverse
# application instance, thus making the captured image available on the network:
main.register_mount(
path=get_captured_image_path(),
app=StaticFiles(directory=captured_stage_images_directory, html=True),
name="captured-stage-images",
)
def on_shutdown(self) -> None:
carb.log_info("ViewportCaptureExtension shutdown")
# When disabling the extension or shutting down the instance of the Omniverse application, let's make sure we
# also deregister our Service's `router` in order to avoid our API being erroneously advertised as present as
# part of the OpenAPI specification despite our handler function no longer being available:
main.deregister_router(router=router)
main.deregister_mount(path=get_captured_image_path())
| 3,424 |
Python
| 57.050846 | 119 | 0.735689 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/poc.services.adv/poc/services/adv/utils.py
|
import asyncio
import os
import shutil
from typing import Optional, Tuple
import carb.settings
import carb.tokens
import omni.kit.actions.core
import omni.kit.app
import omni.usd
# Let's include a small utility method to facilitate obtaining the name of the extension our code is bundled with.
# While we could certainly store and share the `ext_id` provided to the `on_startup()` method of our Extension, this
# alternative method of obtaining the name of our extension can also make our code more portable across projects, as it
# may allow you to keep your code changes located closer together and not have to spread them up to the main entrypoint
# of your extension.
def get_extension_name() -> str:
"""
Return the name of the Extension where the module is defined.
Args:
None
Returns:
str: The name of the Extension where the module is defined.
"""
extension_manager = omni.kit.app.get_app().get_extension_manager()
extension_id = extension_manager.get_extension_id_by_module(__name__)
extension_name = extension_id.split("-")[0]
return extension_name
# Building on the utility method just above, this helper method helps us retrieve the path where captured images are
# served from the web server, so they can be presented to clients over the network.
def get_captured_image_path() -> str:
"""
Return the path where the captured images can be retrieved from the server, in the `/{url_prefix}/{capture_path}`
format.
Args:
None
Returns:
str: The path where the captured images can be retrieved from the server.
"""
extension_name = get_extension_name()
settings = carb.settings.get_settings()
url_prefix = settings.get_as_string(f"exts/{extension_name}/url_prefix")
capture_path = settings.get_as_string(f"exts/{extension_name}/capture_path")
captured_images_path = f"{url_prefix}{capture_path}"
return captured_images_path
# In a similar fashion to the utility method above, this helper method helps us retrieve the path on disk where the
# captured images are stored on the server. This makes it possible to map this storage location known to the server to a
# publicly-accessible location on the server, from which clients will be able to fetch the captured images once their
# web-friendly names have been communicated to clients through our Service's response.
def get_captured_image_directory() -> str:
"""
Return the location on disk where the captured images will be stored, and from which they will be served by the web
server after being mounted. In order to avoid growing the size of this static folder indefinitely, images will be
stored under the `${temp}` folder of the Omniverse application folder, which is emptied when the application is shut
down.
Args:
None
Returns:
str: The location on disk where the captured images will be stored.
"""
extension_name = _get_extension_name()
capture_directory_name = carb.settings.get_settings().get_as_string(f"exts/{extension_name}/capture_directory")
temp_kit_directory = carb.tokens.get_tokens_interface().resolve("${temp}")
captured_stage_images_directory = os.path.join(temp_kit_directory, capture_directory_name)
return captured_stage_images_directory
# This is the main utility method of our collection so far. This small helper builds on the existing capability of the
# "Edit > Capture Screenshot" feature already available in the menu to capture an image from the Omniverse application
# currently running. Upon completion, the captured image is moved to the storage location that is mapped to a
# web-accessible path so that clients are able to retrieve the screenshot once they are informed of the image's unique
# name when our Service issues its response.
async def capture_viewport(usd_stage_path: str) -> Tuple[bool, Optional[str], Optional[str]]:
"""
Capture the viewport, by executing the action already registered in the "Edit > Capture Screenshot" menu.
Args:
usd_stage_path (str): Path of the USD stage to open in the application's viewport.
Returns:
Tuple[bool, Optional[str], Optional[str]]: A tuple containing a flag indicating the success of the operation,
the path of the captured image on the web server, along with an optional error message in case of error.
"""
success: bool = omni.usd.get_context().open_stage(usd_stage_path)
captured_image_path: Optional[str] = None
error_message: Optional[str] = None
if success:
event = asyncio.Event()
menu_action_success: bool = False
capture_screenshot_filepath: Optional[str] = None
def callback(success: bool, captured_image_path: str) -> None:
nonlocal menu_action_success, capture_screenshot_filepath
menu_action_success = success
capture_screenshot_filepath = captured_image_path
event.set()
omni.kit.actions.core.execute_action("omni.kit.menu.edit", "capture_screenshot", callback)
await event.wait()
await asyncio.sleep(delay=1.0)
if menu_action_success:
# Move the screenshot to the location from where it can be served over the network:
destination_filename = os.path.basename(capture_screenshot_filepath)
destination_filepath = os.path.join(get_captured_image_directory(), destination_filename)
shutil.move(src=capture_screenshot_filepath, dst=destination_filepath)
# Record the final location of the captured image, along with the status of the operation:
captured_image_path = os.path.join(get_captured_image_path(), destination_filename)
success = menu_action_success
else:
error_message = f"Unable to open stage \"{usd_stage_path}\"."
return (success, captured_image_path, error_message)
| 5,924 |
Python
| 43.548872 | 120 | 0.716408 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/poc.services.adv/poc/services/adv/services/capture.py
|
from typing import Optional
from pydantic import BaseModel, Field
import carb
from omni.services.core import routers
from ..utils import capture_viewport
router = routers.ServiceAPIRouter()
# Let's define a model to handle the parsing of incoming requests.
#
# Using `pydantic` to handle data-parsing duties makes it less cumbersome for us to do express types, default values,
# minimum/maximum values, etc. while also taking care of documenting input and output properties of our service using
# the OpenAPI specification format.
class ViewportCaptureRequestModel(BaseModel):
"""Model describing the request to capture a viewport as an image."""
usd_stage_path: str = Field(
...,
title="Path of the USD stage for which to capture an image",
description="Location where the USD stage to capture can be found.",
)
# If required, add additional capture response options in subsequent iterations.
# [...]
# We will also define a model to handle the delivery of responses back to clients.
#
# Just like the model used to handle incoming requests, the model to deliver responses will not only help define
# default values of response parameters, but also in documenting the values clients can expect using the OpenAPI
# specification format.
class ViewportCaptureResponseModel(BaseModel):
"""Model describing the response to the request to capture a viewport as an image."""
success: bool = Field(
False,
title="Capture status",
description="Status of the capture of the given USD stage.",
)
captured_image_path: Optional[str] = Field(
None,
title="Captured image path",
description="Path of the captured image, hosted on the current server.",
)
error_message: Optional[str] = Field(
None,
title="Error message",
description="Optional error message in case the operation was not successful.",
)
# If required, add additional capture response options in subsequent iterations.
# [...]
# Using the `@router` annotation, we'll tag our `capture` function handler to document the responses and path of the
# API, once again using the OpenAPI specification format.
@router.post(
path="/capture",
summary="Capture a given USD stage",
description="Capture a given USD stage as an image.",
response_model=ViewportCaptureResponseModel,
)
async def capture(request: ViewportCaptureRequestModel,) -> ViewportCaptureResponseModel:
# For now, let's just print incoming request to the log to confirm all components of our extension are properly
# wired together:
carb.log_warn(f"Received a request to capture an image of \"{request.usd_stage_path}\".")
success, captured_image_path, error_message = await capture_viewport(usd_stage_path=request.usd_stage_path)
# Let's return a JSON response, indicating that the viewport capture operation failed to avoid misinterpreting the
# current lack of image output as a failure:
return ViewportCaptureResponseModel(
success=success,
captured_image_path=captured_image_path,
error_message=error_message,
)
| 3,163 |
Python
| 40.090909 | 118 | 0.726842 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/asset_converter_native_bindings/__init__.py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
"""This module contains bindings and helpers to omniverse_asset_converter interface.
You can use it as follows:
>>> def progress_callback(progress, total_steps):
>>> # pass
>>>
>>> async with OmniAssetConverter(in_filename, out_filename, progress_callback) as converter:
>>> status = converter.get_status()
if status == OmniConverterStatus.OK:
pass # Handle success
else:
error_message = converter.get_detailed_error()
pass # Handle failure
"""
import asyncio
import os, sys, ctypes
import traceback
from pxr import Plug
# preload dep libs into the process
if sys.platform == "win32":
ctypes.WinDLL(os.path.join(os.path.dirname(__file__), "libs/draco.dll"))
ctypes.WinDLL(os.path.join(os.path.dirname(__file__), "libs/assimp-vc141-mt.dll"))
ctypes.WinDLL(os.path.join(os.path.dirname(__file__), "libs/libfbxsdk.dll"))
ctypes.WinDLL(os.path.join(os.path.dirname(__file__), "libs/omniverse_asset_converter.dll"))
elif sys.platform == "linux":
ctypes.CDLL(os.path.join(os.path.dirname(__file__), "libs/libassimp.so"))
ctypes.CDLL(os.path.join(os.path.dirname(__file__), "libs/libxml2.so"), mode=ctypes.RTLD_GLOBAL)
# ctypes.CDLL(os.path.join(os.path.dirname(__file__), 'libs/libfbxsdk.so'))
ctypes.CDLL(os.path.join(os.path.dirname(__file__), "libs/libomniverse_asset_converter.so"))
from ._assetconverter import *
# Register usd plugin to read asset directly into USD.
pluginsRoot = os.path.join(os.path.dirname(__file__), "libs/resources")
Plug.Registry().RegisterPlugins(pluginsRoot)
class OmniAssetConverter:
__future_progress_callbacks = {}
__future_material_loaders = {}
__read_callback = None
__binary_write_callback = None
__progress_callback_is_set = False
__material_loader_is_set = False
__fallback_material_loader = None
def __init__(
self,
in_file,
out_file,
progress_callback=None,
ignore_material=False,
ignore_animation=False,
single_mesh=False,
smooth_normals=False,
ignore_cameras=False,
preview_surface=False,
support_point_instancer=False,
as_shapenet=False,
embed_mdl_in_usd=True,
use_meter_as_world_unit=False,
create_world_as_default_root_prim=True,
ignore_lights=False,
embed_textures=False,
material_loader=None,
convert_fbx_to_y_up=False,
convert_fbx_to_z_up=False,
keep_all_materials=False,
merge_all_meshes=False,
use_double_precision_to_usd_transform_op=False,
ignore_pivots=False,
disable_instancing=False,
export_hidden_props=False,
baking_scales=False
):
"""
Constructor.
Args:
in_file (str): Asset file path to be converted.
out_file (str): Output usd file.
progress_callback: (Callable[int, int]): Progress callback of this task.
The first param is the progress, and second one is the total steps.
ignore_animation (bool): Whether to export animation.
ignore_material (bool): Whether to export materials.
single_mesh (bool): Export Single props USD even there are native instancing in the imported assets.
By default, it will export separate USD files for instancing assets.
smooth_normals (bool): Generate smooth normals for every mesh.
ignore_cameras (bool): Whether to export camera.
preview_surface (bool): Whether to export preview surface of USD.
support_point_instancer (bool): DEPRECATED: Whether to use point instancer for mesh instances (deprecated).
as_shapenet (bool): DEPRECATED: Input is expected to be a shapenet obj file.
embed_mdl_in_usd (bool): DEPRECATED: Embeds mdl into usd without generate on-disk files.
use_meter_as_world_unit (bool): Uses meter as world unit. By default, it's centimeter for usd.
create_world_as_default_root_prim (bool): Whether to create /World as default root prim.
ignore_cameras (bool): Whether to export light.
embed_textures (bool): Whether to embed textures for export.
material_loader (Callable[OmniConveterFuture, OmniConverterMaterialDescription): Material loader for this task.
convert_fbx_to_y_up (bool): Whether to convert imported fbx stage to Maya Y-Up.
convert_fbx_to_z_up (bool): Whether to convert imported fbx stage to Maya Z-Up.
keep_all_materials (bool): Whether to keep all materials including those ones that are not referenced by any meshes.
merge_all_meshes (bool): Whether to merge all meshes as a single one.
use_double_precision_to_usd_transform_op (bool): Whether to use double precision for all USD transform op.
It's double3 for translate op, float3 for pivot, scale and rotation by default.
ignore_pivots (bool): Don't import pivots from assets.
disable_instancing (bool): Disables scene instancing for USD export. That the instanceable flag for all prims will always to false even native assets have instancing.
export_hidden_props (bool): Export props that are hidden or not.
baking_scales (bool): Baking scales into mesh for fbx import.
"""
self._in_file = in_file
self._out_file = out_file
self._status = OmniConverterStatus.IN_PROGRESS
self._detailed_error = ""
self._progress_callback = progress_callback
self._material_loader = material_loader
self._ignore_animation = ignore_animation
self._ignore_material = ignore_material
self._single_mesh = single_mesh
self._smooth_normals = smooth_normals
self._ignore_cameras = ignore_cameras
self._preview_surface = preview_surface
self._support_point_instancer = support_point_instancer
self._as_shapenet = as_shapenet
self._embed_mdl_in_usd = embed_mdl_in_usd
self._use_meter_as_world_unit = use_meter_as_world_unit
self._create_world_as_default_root_prim = create_world_as_default_root_prim
self._ignore_lights = ignore_lights
self._embed_textures = embed_textures
self._convert_fbx_to_y_up = convert_fbx_to_y_up
self._convert_fbx_to_z_up = convert_fbx_to_z_up
self._keep_all_materials = keep_all_materials
self._merge_all_meshes = merge_all_meshes
self._use_double_precision_to_usd_transform_op = use_double_precision_to_usd_transform_op
self._ignore_pivots = ignore_pivots
self._disable_instancing = disable_instancing
self._export_hidden_props = export_hidden_props
self._baking_scales = baking_scales
self._future = None
if not OmniAssetConverter.__progress_callback_is_set:
omniConverterSetProgressCallback(OmniAssetConverter._importer_progress_callback)
OmniAssetConverter.__progress_callback_is_set = True
if not OmniAssetConverter.__material_loader_is_set:
omniConverterSetMaterialCallback(OmniAssetConverter._importer_material_loader)
OmniAssetConverter.__material_loader_is_set = True
@staticmethod
def major_version() -> int:
return OMNI_CONVERTER_MAJOR_VERSION
@staticmethod
def minor_version() -> int:
return OMNI_CONVERTER_MINOR_VERSION
@classmethod
def set_cache_folder(cls, cache_folder):
"""Sets the cache store for USD conversion with USD plugin.
Args:
cache_folder (str): Location of cache folder on your system.
"""
omniConverterSetCacheFolder(cache_folder)
@classmethod
def set_log_callback(cls, callback):
"""Sets log callback globally.
Args:
callback (Callable[str]): Log callback.
"""
omniConverterSetLogCallback(callback)
@classmethod
def set_progress_callback(cls, callback):
"""Sets progress callback globally.
This is used to monitor the asset convert progress.
Args:
callback (Callable[OmniConverterFuture, int, int]): Callback to be called with
converting future, current progress, and total steps.
"""
omniConverterSetProgressCallback(callback)
@classmethod
def set_file_callback(
cls,
mkdir_callback,
binary_write_callback,
file_exists_callback,
read_callback,
layer_write_callback=None,
file_copy_callback=None,
):
"""Sets calbacks for file operations.
This is used to override the file operations so it could
be used to read asset from remote repository. By default,
it will use fallback functions that support only to read
from local disk.
Args:
mkdir_callback (Callable[str]): Function to create dir with path.
binary_write_callback (Callable[str, Buffer]): Function to write binary with path and content.
file_exists_callback (Callable[str]): Function to check file existence with path.
read_callback (Callable[str] -> bytes): Function to read bytes from path.
layer_write_callback (Callable[str, str]): Function to write layer content with target path and layer identifier.
file_copy_callback (Callable[str, str]): Function to copy file to target path with target path and source path.
"""
cls.__read_callback = read_callback
cls.__binary_write_callback = binary_write_callback
if read_callback:
_internal_read_callback = cls._importer_read_callback
else:
_internal_read_callback = None
if binary_write_callback:
_internal_write_callback = cls._importer_write_callback
else:
_internal_write_callback = None
omniConverterSetFileCallbacks(
mkdir_callback,
_internal_write_callback,
file_exists_callback,
_internal_read_callback,
layer_write_callback,
file_copy_callback,
)
@classmethod
def set_material_loader(cls, material_loader):
"""Sets material loader to intercept material loading.
This function is deprecated since material loader is
moved to constructor to make it customized per task.
You can still set material load with this function which
will work as a global fallback one if no material loader
is provided to the constructor.
Args:
material_loader (Callable[OmniConverterMaterialDescription]): Function that takes
material description as param.
"""
OmniAssetConverter.__fallback_material_loader = material_loader
@classmethod
def populate_all_materials(cls, asset_path):
"""Populates all material descriptions from assets.
Args:
asset_path (str): Asset path. Only FBX is supported currently.
Returns:
([OmniConverterMaterialDescription]): Array of material descriptions.
"""
return omniConverterPopulateMaterials(asset_path)
@classmethod
def _importer_write_callback(cls, path, blob):
if not cls.__binary_write_callback:
return False
return cls.__binary_write_callback(path, memoryview(blob))
@classmethod
def _importer_read_callback(cls, path, blob):
if not cls.__read_callback:
return False
file_bytes = bytes(cls.__read_callback(path))
if file_bytes:
blob.assign(file_bytes)
return True
else:
return False
@classmethod
def _importer_progress_callback(cls, future, progress, total):
callback = cls.__future_progress_callbacks.get(future, None)
if callback:
callback(progress, total)
@classmethod
def _importer_material_loader(cls, future, material_description):
callback = cls.__future_material_loaders.get(future, None)
if callback:
return callback(material_description)
elif OmniAssetConverter.__fallback_material_loader:
return OmniAssetConverter.__fallback_material_loader(material_description)
else:
return None
@classmethod
def shutdown(cls):
"""Cleans up all setups. After this, all callbacks will be reset to fallback ones."""
cls.__read_callback = None
cls.set_file_callback(None, None, None, None, None, None)
cls.set_log_callback(None)
cls.set_progress_callback(None)
cls.set_material_loader(None)
cls.__material_loader_is_set = False
cls.__progress_callback_is_set = False
cls.__future_progress_callbacks = {}
cls.__future_material_loaders = {}
cls.__fallback_material_loader = None
def get_status(self):
"""Gets the status of this task. See `OmniConverterStatus`."""
return self._status
def get_detailed_error(self):
"""Gets the detailed error of this task if status is not OmniConverterStatus.OK"""
return self._detailed_error
def cancel(self):
"""Cancels this task."""
if self._future:
omniConverterCancelFuture(self._future)
self._status = omniConverterCheckFutureStatus(self._future)
self._detailed_error = omniConverterGetFutureDetailedError(self._future)
async def __aenter__(self):
flags = 0
if self._ignore_animation:
flags |= OMNI_CONVERTER_FLAGS_IGNORE_ANIMATION
if self._ignore_material:
flags |= OMNI_CONVERTER_FLAGS_IGNORE_MATERIALS
if self._single_mesh:
flags |= OMNI_CONVERTER_FLAGS_SINGLE_MESH_FILE
if self._smooth_normals:
flags |= OMNI_CONVERTER_FLAGS_GEN_SMOOTH_NORMALS
if self._ignore_cameras:
flags |= OMNI_CONVERTER_FLAGS_IGNORE_CAMERAS
if self._preview_surface:
flags |= OMNI_CONVERTER_FLAGS_EXPORT_PREVIEW_SURFACE
if self._support_point_instancer:
flags |= OMNI_CONVERTER_FLAGS_SUPPORT_POINTER_INSTANCER
if self._as_shapenet:
flags |= OMNI_CONVERTER_FLAGS_EXPORT_AS_SHAPENET
if self._embed_mdl_in_usd:
flags |= OMNI_CONVERTER_FLAGS_EMBED_MDL
if self._use_meter_as_world_unit:
flags |= OMNI_CONVERTER_FLAGS_USE_METER_PER_UNIT
if self._create_world_as_default_root_prim:
flags |= OMNI_CONVERTER_FLAGS_CREATE_WORLD_AS_DEFAULT_PRIM
if self._ignore_lights:
flags |= OMNI_CONVERTER_FLAGS_IGNORE_LIGHTS
if self._embed_textures:
flags |= OMNI_CONVERTER_FLAGS_EMBED_TEXTURES
if self._convert_fbx_to_y_up:
flags |= OMNI_CONVERTER_FLAGS_FBX_CONVERT_TO_Y_UP
if self._convert_fbx_to_z_up:
flags |= OMNI_CONVERTER_FLAGS_FBX_CONVERT_TO_Z_UP
if self._keep_all_materials:
flags |= OMNI_CONVERTER_FLAGS_KEEP_ALL_MATERIALS
if self._merge_all_meshes:
flags |= OMNI_CONVERTER_FLAGS_MERGE_ALL_MESHES
if self._use_double_precision_to_usd_transform_op:
flags |= OMNI_CONVERTER_FLAGS_USE_DOUBLE_PRECISION_FOR_USD_TRANSFORM_OP
if self._ignore_pivots:
flags |= OMNI_CONVERTER_FLAGS_IGNORE_PIVOTS
if self._disable_instancing:
flags |= OMNI_CONVERTER_FLAGS_DISABLE_INSTANCING
if self._export_hidden_props:
flags |= OMNI_CONVERTER_FLAGS_EXPORT_HIDDEN_PROPS
if self._baking_scales:
flags |= OMNI_CONVERTER_FLAGS_FBX_BAKING_SCALES_INTO_MESH
try:
self._future = omniConverterCreateAsset(self._in_file, self._out_file, flags)
if self._progress_callback:
OmniAssetConverter.__future_progress_callbacks[self._future] = self._progress_callback
if self._material_loader:
OmniAssetConverter.__future_material_loaders[self._future] = self._material_loader
status = OmniConverterStatus.IN_PROGRESS
while True:
status = omniConverterCheckFutureStatus(self._future)
if status == OmniConverterStatus.IN_PROGRESS:
await asyncio.sleep(0.1)
else:
break
self._status = status
self._detailed_error = omniConverterGetFutureDetailedError(self._future)
except Exception as e:
traceback.print_exc()
self._status = OmniConverterStatus.UNKNOWN
self._detailed_error = f"Failed to convert {self._in_file} with error: str(e)."
return self
async def __aexit__(self, exc_type, exc, tb):
OmniAssetConverter.__future_progress_callbacks.pop(self._future, None)
OmniAssetConverter.__future_material_loaders.pop(self._future, None)
omniConverterReleaseFuture(self._future)
self._future = None
| 17,735 |
Python
| 38.677852 | 178 | 0.639075 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/PACKAGE-LICENSES/sha1-LICENSE.md
|
============
SHA-1 in C++
============
100% Public Domain.
Original C Code
-- Steve Reid <[email protected]>
Small changes to fit into bglibs
-- Bruce Guenter <[email protected]>
Translation to simpler C++ Code
-- Volker Diels-Grabsch <[email protected]>
Safety fixes
-- Eugene Hopkinson <slowriot at voxelstorm dot com>
Header-only library
-- Zlatko Michailov <[email protected]>
| 383 |
Markdown
| 22.999999 | 53 | 0.694517 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/PACKAGE-LICENSES/pybind11-LICENSE.md
|
Copyright (c) 2016 Wenzel Jakob <[email protected]>, All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Please also refer to the file CONTRIBUTING.md, which clarifies licensing of
external contributions to this project including patches, pull requests, etc.
| 1,676 |
Markdown
| 54.899998 | 79 | 0.809666 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/PACKAGE-LICENSES/tinyobjloader-LICENSE.md
|
The MIT License (MIT)
Copyright (c) 2012-2019 Syoyo Fujita and many contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
----------------------------------
mapbox/earcut.hpp
ISC License
Copyright (c) 2015, Mapbox
Permission to use, copy, modify, and/or distribute this software for any purpose
with or without fee is hereby granted, provided that the above copyright notice
and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
| 1,902 |
Markdown
| 43.255813 | 80 | 0.790221 |
parkerjgit/omniverse-sandbox/poc.extensions/adv-service/exts/omni.kit.asset_converter-1.2.39+wx64.r.cp37/PACKAGE-LICENSES/assimp-LICENSE.md
|
Open Asset Import Library (assimp)
Copyright (c) 2006-2016, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of the assimp team.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************
AN EXCEPTION applies to all files in the ./test/models-nonbsd folder.
These are 3d models for testing purposes, from various free sources
on the internet. They are - unless otherwise stated - copyright of
their respective creators, which may impose additional requirements
on the use of their work. For any of these models, see
<model-name>.source.txt for more legal information. Contact us if you
are a copyright holder and believe that we credited you inproperly or
if you don't want your files to appear in the repository.
******************************************************************************
Poly2Tri Copyright (c) 2009-2010, Poly2Tri Contributors
http://code.google.com/p/poly2tri/
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Poly2Tri nor the names of its contributors may be
used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 3,791 |
Markdown
| 46.999999 | 81 | 0.772619 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.