Multi-GraspSet / utils /hand_model.py
lhslhslhs's picture
Barrett
ce56262
import json
import os
from tabnanny import check
import matplotlib.pyplot as plt
import numpy as np
import pytorch_kinematics as pk
import torch
import torch.nn
import transforms3d
import trimesh as tm
import urdf_parser_py.urdf as URDF_PARSER
from plotly import graph_objects as go
from pytorch_kinematics.urdf_parser_py.urdf import (URDF, Box, Cylinder, Mesh, Sphere)
from .rot6d import *
from .utils_math import *
import trimesh.sample
# from kaolin.metrics.trianglemesh import point_to_mesh_distance
# from kaolin.ops.mesh import check_sign, index_vertices_by_faces, face_normals
# from utils.visualize_plotly import plot_mesh
class HandModel:
def __init__(self, robot_name, urdf_filename, mesh_path,
batch_size=1,
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
mesh_nsp=128,
hand_scale=2.
):
self.device = device
self.robot_name = robot_name
self.batch_size = batch_size
# prepare model
self.robot = pk.build_chain_from_urdf(open(urdf_filename).read()).to(dtype=torch.float, device=self.device)
self.robot_full = URDF_PARSER.URDF.from_xml_file(urdf_filename)
# prepare contact point basis and surface point samples
# self.no_contact_dict = json.load(open(os.path.join('data', 'urdf', 'intersection_%s.json'%robot_name)))
# prepare geometries for visualization
self.global_translation = None
self.global_rotation = None
self.softmax = torch.nn.Softmax(dim=-1)
# prepare contact point basis and surface point samples
#self.contact_point_dict = json.load(open(os.path.join("./hand_model", 'contact_%s.json' % robot_name)))
self.contact_point_basis = {}
self.contact_normals = {}
self.surface_points = {}
self.surface_points_normal = {}
visual = URDF.from_xml_string(open(urdf_filename).read())
self.mesh_verts = {}
self.mesh_faces = {}
self.canon_verts = []
self.canon_faces = []
self.idx_vert_faces = []
self.face_normals = []
verts_bias = 0
for i_link, link in enumerate(visual.links):
print(f"Processing link #{i_link}: {link.name}")
# load mesh
if len(link.visuals) == 0:
continue
if type(link.visuals[0].geometry) == Mesh:
# print(link.visuals[0])
if robot_name == 'shadowhand' or robot_name == 'allegro' or robot_name == 'barrett':
filename = link.visuals[0].geometry.filename.split('/')[-1]
elif robot_name == 'allegro':
filename = f"{link.visuals[0].geometry.filename.split('/')[-2]}/{link.visuals[0].geometry.filename.split('/')[-1]}"
else:
filename = link.visuals[0].geometry.filename
mesh = tm.load(os.path.join(mesh_path, filename), force='mesh', process=False)
elif type(link.visuals[0].geometry) == Cylinder:
mesh = tm.primitives.Cylinder(
radius=link.visuals[0].geometry.radius, height=link.visuals[0].geometry.length)
elif type(link.visuals[0].geometry) == Box:
mesh = tm.primitives.Box(extents=link.visuals[0].geometry.size)
elif type(link.visuals[0].geometry) == Sphere:
mesh = tm.primitives.Sphere(
radius=link.visuals[0].geometry.radius)
else:
print(type(link.visuals[0].geometry))
raise NotImplementedError
try:
scale = np.array(
link.visuals[0].geometry.scale).reshape([1, 3])
except:
scale = np.array([[1, 1, 1]])
try:
rotation = transforms3d.euler.euler2mat(*link.visuals[0].origin.rpy)
translation = np.reshape(link.visuals[0].origin.xyz, [1, 3])
# print('---')
# print(link.visuals[0].origin.rpy, rotation)
# print('---')
except AttributeError:
rotation = transforms3d.euler.euler2mat(0, 0, 0)
translation = np.array([[0, 0, 0]])
# Surface point
# mesh.sample(int(mesh.area * 100000)) * scale
# todo: marked original count is 128
if self.robot_name == 'shadowhand':
pts, pts_face_index = trimesh.sample.sample_surface_even(mesh=mesh, count=64)
pts_normal = np.array([mesh.face_normals[x] for x in pts_face_index], dtype=float)
else:
pts, pts_face_index = trimesh.sample.sample_surface_even(mesh=mesh, count=128)
pts_normal = np.array([mesh.face_normals[x] for x in pts_face_index], dtype=float)
if self.robot_name == 'barrett':
if link.name in ['bh_base_link']:
pts = trimesh.sample.volume_mesh(mesh=mesh, count=1024)
pts_normal = np.array([[0., 0., 1.] for x in range(pts.shape[0])], dtype=float)
if self.robot_name == 'ezgripper':
if link.name in ['left_ezgripper_palm_link']:
pts = trimesh.sample.volume_mesh(mesh=mesh, count=1024)
pts_normal = np.array([[1., 0., 0.] for x in range(pts.shape[0])], dtype=float)
if self.robot_name == 'robotiq_3finger':
if link.name in ['gripper_palm']:
pts = trimesh.sample.volume_mesh(mesh=mesh, count=1024)
pts_normal = np.array([[0., 0., 1.] for x in range(pts.shape[0])], dtype=float)
pts *= scale
# pts = mesh.sample(128) * scale
# print(link.name, len(pts))
# new
if robot_name == 'shadowhand':
pts = pts[:, [0, 2, 1]]
pts_normal = pts_normal[:, [0, 2, 1]]
pts[:, 1] *= -1
pts_normal[:, 1] *= -1
pts = np.matmul(rotation, pts.T).T + translation
# pts_normal = np.matmul(rotation, pts_normal.T).T
pts = np.concatenate([pts, np.ones([len(pts), 1])], axis=-1)
pts_normal = np.concatenate([pts_normal, np.ones([len(pts_normal), 1])], axis=-1)
self.surface_points[link.name] = torch.from_numpy(pts).to(
device).float().unsqueeze(0).repeat(batch_size, 1, 1)
self.surface_points_normal[link.name] = torch.from_numpy(pts_normal).to(
device).float().unsqueeze(0).repeat(batch_size, 1, 1)
# visualization mesh
self.mesh_verts[link.name] = np.array(mesh.vertices) * scale
if robot_name == 'shadowhand':
self.mesh_verts[link.name] = self.mesh_verts[link.name][:, [0, 2, 1]]
self.mesh_verts[link.name][:, 1] *= -1
self.mesh_verts[link.name] = np.matmul(rotation, self.mesh_verts[link.name].T).T + translation
self.mesh_faces[link.name] = np.array(mesh.faces)
# point and normal of palm center
# contact point
# if link.name in self.contact_point_dict:
# # if link.name != 'index_1': continue
# # new 1.11
# cpb = np.array(self.contact_point_dict[link.name])
# # print("cpb shape: ", cpb.shape, len(cpb.shape))
# if len(cpb.shape) > 1:
# cpb = cpb[np.random.randint(cpb.shape[0], size=1)][0]
# # print(link.name, cpb)
# # go.Figure(data = [
# # go.Mesh3d(x=mesh.vertices[:,0], y=mesh.vertices[:,1], z=mesh.vertices[:,2], i=mesh.faces[:,0], j=mesh.faces[:,1], k=mesh.faces[:,2]),
# # go.Scatter3d(x=mesh.vertices[cpb,0], y=mesh.vertices[cpb, 1], z=mesh.vertices[cpb,2])]).show()
# # input()
# cp_basis = mesh.vertices[cpb] * scale
# # print(cpb, "cp_basis: ", cp_basis)
# if robot_name == 'shadowhand':
# cp_basis = cp_basis[:, [0, 2, 1]]
# cp_basis[:, 1] *= -1
# cp_basis = np.matmul(rotation, cp_basis.T).T + translation
# cp_basis = torch.cat([torch.from_numpy(cp_basis).to(device).float(), torch.ones([4, 1]).to(device).float()], dim=-1)
# self.contact_point_basis[link.name] = cp_basis.unsqueeze( 0).repeat(batch_size, 1, 1)
# v1 = cp_basis[1, :3] - cp_basis[0, :3]
# v2 = cp_basis[2, :3] - cp_basis[0, :3]
# v1 = v1 / torch.norm(v1)
# v2 = v2 / torch.norm(v2)
# self.contact_normals[link.name] = torch.cross(v1, v2).view([1, 3])
# self.contact_normals[link.name] = self.contact_normals[link.name].unsqueeze(0).repeat(batch_size, 1, 1)
self.revolute_joints = []
for i in range(len(self.robot_full.joints)):
if self.robot_full.joints[i].joint_type == 'revolute':
self.revolute_joints.append(self.robot_full.joints[i])
self.revolute_joints_q_mid = []
self.revolute_joints_q_var = []
self.revolute_joints_q_upper = []
self.revolute_joints_q_lower = []
for i in range(len(self.robot.get_joint_parameter_names())):
for j in range(len(self.revolute_joints)):
if self.revolute_joints[j].name == self.robot.get_joint_parameter_names()[i]:
joint = self.revolute_joints[j]
assert joint.name == self.robot.get_joint_parameter_names()[i]
self.revolute_joints_q_mid.append(
(joint.limit.lower + joint.limit.upper) / 2)
self.revolute_joints_q_var.append(
((joint.limit.upper - joint.limit.lower) / 2) ** 2)
self.revolute_joints_q_lower.append(joint.limit.lower)
self.revolute_joints_q_upper.append(joint.limit.upper)
self.revolute_joints_q_lower = torch.Tensor(
self.revolute_joints_q_lower).repeat([self.batch_size, 1]).to(device)
self.revolute_joints_q_upper = torch.Tensor(
self.revolute_joints_q_upper).repeat([self.batch_size, 1]).to(device)
self.current_status = None
self.scale = hand_scale
def update_kinematics(self, q):
self.global_translation = q[:, :3]
self.global_rotation = robust_compute_rotation_matrix_from_ortho6d(q[:,3:9])
self.current_status = self.robot.forward_kinematics(q[:,9:])
def get_meshes_from_q(self, q=None, i=0):
data = []
if q is not None: self.update_kinematics(q)
for idx, link_name in enumerate(self.mesh_verts):
trans_matrix = self.current_status[link_name].get_matrix()
trans_matrix = trans_matrix[min(len(trans_matrix) - 1, i)].detach().cpu().numpy()
v = self.mesh_verts[link_name]
transformed_v = np.concatenate([v, np.ones([len(v), 1])], axis=-1)
transformed_v = np.matmul(trans_matrix, transformed_v.T).T[..., :3]
transformed_v = np.matmul(self.global_rotation[i].detach().cpu().numpy(),
transformed_v.T).T + np.expand_dims(
self.global_translation[i].detach().cpu().numpy(), 0)
transformed_v = transformed_v * self.scale
f = self.mesh_faces[link_name]
data.append(tm.Trimesh(vertices=transformed_v, faces=f))
return data
def get_plotly_data(self, q=None, i=0, color='lightblue', opacity=1.):
data = []
if q is not None: self.update_kinematics(q)
for idx, link_name in enumerate(self.mesh_verts):
trans_matrix = self.current_status[link_name].get_matrix()
trans_matrix = trans_matrix[min(len(trans_matrix) - 1, i)].detach().cpu().numpy()
v = self.mesh_verts[link_name]
transformed_v = np.concatenate([v, np.ones([len(v), 1])], axis=-1)
transformed_v = np.matmul(trans_matrix, transformed_v.T).T[..., :3]
transformed_v = np.matmul(self.global_rotation[i].detach().cpu().numpy(),
transformed_v.T).T + np.expand_dims(
self.global_translation[i].detach().cpu().numpy(), 0)
transformed_v = transformed_v * self.scale
f = self.mesh_faces[link_name]
data.append(
go.Mesh3d(x=transformed_v[:, 0], y=transformed_v[:, 1], z=transformed_v[:, 2], i=f[:, 0], j=f[:, 1],
k=f[:, 2], color=color, opacity=opacity))
return data