| import json |
| import os |
| from tabnanny import check |
|
|
| import matplotlib.pyplot as plt |
| import numpy as np |
| import pytorch_kinematics as pk |
| import torch |
| import torch.nn |
| import transforms3d |
| import trimesh as tm |
| import urdf_parser_py.urdf as URDF_PARSER |
| from plotly import graph_objects as go |
| from pytorch_kinematics.urdf_parser_py.urdf import (URDF, Box, Cylinder, Mesh, Sphere) |
| from .rot6d import * |
| from .utils_math import * |
| import trimesh.sample |
| |
| |
|
|
| |
|
|
|
|
| class HandModel: |
| def __init__(self, robot_name, urdf_filename, mesh_path, |
| batch_size=1, |
| device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'), |
| mesh_nsp=128, |
| hand_scale=2. |
| ): |
| self.device = device |
| self.robot_name = robot_name |
| self.batch_size = batch_size |
| |
| self.robot = pk.build_chain_from_urdf(open(urdf_filename).read()).to(dtype=torch.float, device=self.device) |
| self.robot_full = URDF_PARSER.URDF.from_xml_file(urdf_filename) |
| |
| |
|
|
| |
| self.global_translation = None |
| self.global_rotation = None |
| self.softmax = torch.nn.Softmax(dim=-1) |
| |
| |
| self.contact_point_basis = {} |
| self.contact_normals = {} |
| self.surface_points = {} |
| self.surface_points_normal = {} |
| visual = URDF.from_xml_string(open(urdf_filename).read()) |
| self.mesh_verts = {} |
| self.mesh_faces = {} |
| |
| self.canon_verts = [] |
| self.canon_faces = [] |
| self.idx_vert_faces = [] |
| self.face_normals = [] |
| verts_bias = 0 |
| |
| for i_link, link in enumerate(visual.links): |
| print(f"Processing link #{i_link}: {link.name}") |
| |
| if len(link.visuals) == 0: |
| continue |
| if type(link.visuals[0].geometry) == Mesh: |
| |
| if robot_name == 'shadowhand' or robot_name == 'allegro' or robot_name == 'barrett': |
| filename = link.visuals[0].geometry.filename.split('/')[-1] |
| elif robot_name == 'allegro': |
| filename = f"{link.visuals[0].geometry.filename.split('/')[-2]}/{link.visuals[0].geometry.filename.split('/')[-1]}" |
| else: |
| filename = link.visuals[0].geometry.filename |
| mesh = tm.load(os.path.join(mesh_path, filename), force='mesh', process=False) |
| elif type(link.visuals[0].geometry) == Cylinder: |
| mesh = tm.primitives.Cylinder( |
| radius=link.visuals[0].geometry.radius, height=link.visuals[0].geometry.length) |
| elif type(link.visuals[0].geometry) == Box: |
| mesh = tm.primitives.Box(extents=link.visuals[0].geometry.size) |
| elif type(link.visuals[0].geometry) == Sphere: |
| mesh = tm.primitives.Sphere( |
| radius=link.visuals[0].geometry.radius) |
| else: |
| print(type(link.visuals[0].geometry)) |
| raise NotImplementedError |
| try: |
| scale = np.array( |
| link.visuals[0].geometry.scale).reshape([1, 3]) |
| except: |
| scale = np.array([[1, 1, 1]]) |
| try: |
| rotation = transforms3d.euler.euler2mat(*link.visuals[0].origin.rpy) |
| translation = np.reshape(link.visuals[0].origin.xyz, [1, 3]) |
| |
| |
| |
| except AttributeError: |
| rotation = transforms3d.euler.euler2mat(0, 0, 0) |
| translation = np.array([[0, 0, 0]]) |
| |
| |
| |
| |
| if self.robot_name == 'shadowhand': |
| pts, pts_face_index = trimesh.sample.sample_surface_even(mesh=mesh, count=64) |
| pts_normal = np.array([mesh.face_normals[x] for x in pts_face_index], dtype=float) |
| else: |
| pts, pts_face_index = trimesh.sample.sample_surface_even(mesh=mesh, count=128) |
| pts_normal = np.array([mesh.face_normals[x] for x in pts_face_index], dtype=float) |
|
|
| if self.robot_name == 'barrett': |
| if link.name in ['bh_base_link']: |
| pts = trimesh.sample.volume_mesh(mesh=mesh, count=1024) |
| pts_normal = np.array([[0., 0., 1.] for x in range(pts.shape[0])], dtype=float) |
| if self.robot_name == 'ezgripper': |
| if link.name in ['left_ezgripper_palm_link']: |
| pts = trimesh.sample.volume_mesh(mesh=mesh, count=1024) |
| pts_normal = np.array([[1., 0., 0.] for x in range(pts.shape[0])], dtype=float) |
| if self.robot_name == 'robotiq_3finger': |
| if link.name in ['gripper_palm']: |
| pts = trimesh.sample.volume_mesh(mesh=mesh, count=1024) |
| pts_normal = np.array([[0., 0., 1.] for x in range(pts.shape[0])], dtype=float) |
|
|
| pts *= scale |
| |
| |
| |
| if robot_name == 'shadowhand': |
| pts = pts[:, [0, 2, 1]] |
| pts_normal = pts_normal[:, [0, 2, 1]] |
| pts[:, 1] *= -1 |
| pts_normal[:, 1] *= -1 |
|
|
| pts = np.matmul(rotation, pts.T).T + translation |
| |
| pts = np.concatenate([pts, np.ones([len(pts), 1])], axis=-1) |
| pts_normal = np.concatenate([pts_normal, np.ones([len(pts_normal), 1])], axis=-1) |
| self.surface_points[link.name] = torch.from_numpy(pts).to( |
| device).float().unsqueeze(0).repeat(batch_size, 1, 1) |
| self.surface_points_normal[link.name] = torch.from_numpy(pts_normal).to( |
| device).float().unsqueeze(0).repeat(batch_size, 1, 1) |
|
|
| |
| self.mesh_verts[link.name] = np.array(mesh.vertices) * scale |
| if robot_name == 'shadowhand': |
| self.mesh_verts[link.name] = self.mesh_verts[link.name][:, [0, 2, 1]] |
| self.mesh_verts[link.name][:, 1] *= -1 |
| self.mesh_verts[link.name] = np.matmul(rotation, self.mesh_verts[link.name].T).T + translation |
| self.mesh_faces[link.name] = np.array(mesh.faces) |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| self.revolute_joints = [] |
| for i in range(len(self.robot_full.joints)): |
| if self.robot_full.joints[i].joint_type == 'revolute': |
| self.revolute_joints.append(self.robot_full.joints[i]) |
| self.revolute_joints_q_mid = [] |
| self.revolute_joints_q_var = [] |
| self.revolute_joints_q_upper = [] |
| self.revolute_joints_q_lower = [] |
| for i in range(len(self.robot.get_joint_parameter_names())): |
| for j in range(len(self.revolute_joints)): |
| if self.revolute_joints[j].name == self.robot.get_joint_parameter_names()[i]: |
| joint = self.revolute_joints[j] |
| assert joint.name == self.robot.get_joint_parameter_names()[i] |
| self.revolute_joints_q_mid.append( |
| (joint.limit.lower + joint.limit.upper) / 2) |
| self.revolute_joints_q_var.append( |
| ((joint.limit.upper - joint.limit.lower) / 2) ** 2) |
| self.revolute_joints_q_lower.append(joint.limit.lower) |
| self.revolute_joints_q_upper.append(joint.limit.upper) |
|
|
| self.revolute_joints_q_lower = torch.Tensor( |
| self.revolute_joints_q_lower).repeat([self.batch_size, 1]).to(device) |
| self.revolute_joints_q_upper = torch.Tensor( |
| self.revolute_joints_q_upper).repeat([self.batch_size, 1]).to(device) |
|
|
| self.current_status = None |
|
|
| self.scale = hand_scale |
| |
|
|
| def update_kinematics(self, q): |
| self.global_translation = q[:, :3] |
|
|
| self.global_rotation = robust_compute_rotation_matrix_from_ortho6d(q[:,3:9]) |
| self.current_status = self.robot.forward_kinematics(q[:,9:]) |
| |
|
|
| |
|
|
| def get_meshes_from_q(self, q=None, i=0): |
| data = [] |
| if q is not None: self.update_kinematics(q) |
| for idx, link_name in enumerate(self.mesh_verts): |
| trans_matrix = self.current_status[link_name].get_matrix() |
| trans_matrix = trans_matrix[min(len(trans_matrix) - 1, i)].detach().cpu().numpy() |
| v = self.mesh_verts[link_name] |
| transformed_v = np.concatenate([v, np.ones([len(v), 1])], axis=-1) |
| transformed_v = np.matmul(trans_matrix, transformed_v.T).T[..., :3] |
| transformed_v = np.matmul(self.global_rotation[i].detach().cpu().numpy(), |
| transformed_v.T).T + np.expand_dims( |
| self.global_translation[i].detach().cpu().numpy(), 0) |
| transformed_v = transformed_v * self.scale |
| f = self.mesh_faces[link_name] |
| data.append(tm.Trimesh(vertices=transformed_v, faces=f)) |
| return data |
| |
| def get_plotly_data(self, q=None, i=0, color='lightblue', opacity=1.): |
| data = [] |
| if q is not None: self.update_kinematics(q) |
| for idx, link_name in enumerate(self.mesh_verts): |
| trans_matrix = self.current_status[link_name].get_matrix() |
| trans_matrix = trans_matrix[min(len(trans_matrix) - 1, i)].detach().cpu().numpy() |
| v = self.mesh_verts[link_name] |
| transformed_v = np.concatenate([v, np.ones([len(v), 1])], axis=-1) |
| transformed_v = np.matmul(trans_matrix, transformed_v.T).T[..., :3] |
| transformed_v = np.matmul(self.global_rotation[i].detach().cpu().numpy(), |
| transformed_v.T).T + np.expand_dims( |
| self.global_translation[i].detach().cpu().numpy(), 0) |
| transformed_v = transformed_v * self.scale |
| f = self.mesh_faces[link_name] |
| data.append( |
| go.Mesh3d(x=transformed_v[:, 0], y=transformed_v[:, 1], z=transformed_v[:, 2], i=f[:, 0], j=f[:, 1], |
| k=f[:, 2], color=color, opacity=opacity)) |
| return data |
|
|
|
|