code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def axis_angle_from_quat(quat: torch.Tensor, eps: float = 1.0e-6) -> torch.Tensor: """Convert rotations given as quaternions to axis/angle. Args: quat: The quaternion orientation in (w, x, y, z). Shape is (..., 4). eps: The tolerance for Taylor approximation. Defaults to 1.0e-6. Returns: Rotations given as a vector in axis angle form. Shape is (..., 3). The vector's magnitude is the angle turned anti-clockwise in radians around the vector's direction. Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L526-L554 """ # Modified to take in quat as [q_w, q_x, q_y, q_z] # Quaternion is [q_w, q_x, q_y, q_z] = [cos(theta/2), n_x * sin(theta/2), n_y * sin(theta/2), n_z * sin(theta/2)] # Axis-angle is [a_x, a_y, a_z] = [theta * n_x, theta * n_y, theta * n_z] # Thus, axis-angle is [q_x, q_y, q_z] / (sin(theta/2) / theta) # When theta = 0, (sin(theta/2) / theta) is undefined # However, as theta --> 0, we can use the Taylor approximation 1/2 - theta^2 / 48 quat = quat * (1.0 - 2.0 * (quat[..., 0:1] < 0.0)) mag = torch.linalg.norm(quat[..., 1:], dim=-1) half_angle = torch.atan2(mag, quat[..., 0]) angle = 2.0 * half_angle # check whether to apply Taylor approximation sin_half_angles_over_angles = torch.where( angle.abs() > eps, torch.sin(half_angle) / angle, 0.5 - angle * angle / 48 ) return quat[..., 1:4] / sin_half_angles_over_angles.unsqueeze(-1)
Convert rotations given as quaternions to axis/angle. Args: quat: The quaternion orientation in (w, x, y, z). Shape is (..., 4). eps: The tolerance for Taylor approximation. Defaults to 1.0e-6. Returns: Rotations given as a vector in axis angle form. Shape is (..., 3). The vector's magnitude is the angle turned anti-clockwise in radians around the vector's direction. Reference: https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L526-L554
axis_angle_from_quat
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def quat_error_magnitude(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor: """Computes the rotation difference between two quaternions. Args: q1: The first quaternion in (w, x, y, z). Shape is (..., 4). q2: The second quaternion in (w, x, y, z). Shape is (..., 4). Returns: Angular error between input quaternions in radians. """ quat_diff = quat_mul(q1, quat_conjugate(q2)) return torch.norm(axis_angle_from_quat(quat_diff), dim=-1)
Computes the rotation difference between two quaternions. Args: q1: The first quaternion in (w, x, y, z). Shape is (..., 4). q2: The second quaternion in (w, x, y, z). Shape is (..., 4). Returns: Angular error between input quaternions in radians.
quat_error_magnitude
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def skew_symmetric_matrix(vec: torch.Tensor) -> torch.Tensor: """Computes the skew-symmetric matrix of a vector. Args: vec: The input vector. Shape is (3,) or (N, 3). Returns: The skew-symmetric matrix. Shape is (1, 3, 3) or (N, 3, 3). Raises: ValueError: If input tensor is not of shape (..., 3). """ # check input is correct if vec.shape[-1] != 3: raise ValueError(f"Expected input vector shape mismatch: {vec.shape} != (..., 3).") # unsqueeze the last dimension if vec.ndim == 1: vec = vec.unsqueeze(0) # create a skew-symmetric matrix skew_sym_mat = torch.zeros(vec.shape[0], 3, 3, device=vec.device, dtype=vec.dtype) skew_sym_mat[:, 0, 1] = -vec[:, 2] skew_sym_mat[:, 0, 2] = vec[:, 1] skew_sym_mat[:, 1, 2] = -vec[:, 0] skew_sym_mat[:, 1, 0] = vec[:, 2] skew_sym_mat[:, 2, 0] = -vec[:, 1] skew_sym_mat[:, 2, 1] = vec[:, 0] return skew_sym_mat
Computes the skew-symmetric matrix of a vector. Args: vec: The input vector. Shape is (3,) or (N, 3). Returns: The skew-symmetric matrix. Shape is (1, 3, 3) or (N, 3, 3). Raises: ValueError: If input tensor is not of shape (..., 3).
skew_symmetric_matrix
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def is_identity_pose(pos: torch.tensor, rot: torch.tensor) -> bool: """Checks if input poses are identity transforms. The function checks if the input position and orientation are close to zero and identity respectively using L2-norm. It does NOT check the error in the orientation. Args: pos: The cartesian position. Shape is (N, 3). rot: The quaternion in (w, x, y, z). Shape is (N, 4). Returns: True if all the input poses result in identity transform. Otherwise, False. """ # create identity transformations pos_identity = torch.zeros_like(pos) rot_identity = torch.zeros_like(rot) rot_identity[..., 0] = 1 # compare input to identity return torch.allclose(pos, pos_identity) and torch.allclose(rot, rot_identity)
Checks if input poses are identity transforms. The function checks if the input position and orientation are close to zero and identity respectively using L2-norm. It does NOT check the error in the orientation. Args: pos: The cartesian position. Shape is (N, 3). rot: The quaternion in (w, x, y, z). Shape is (N, 4). Returns: True if all the input poses result in identity transform. Otherwise, False.
is_identity_pose
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def combine_frame_transforms( t01: torch.Tensor, q01: torch.Tensor, t12: torch.Tensor | None = None, q12: torch.Tensor | None = None ) -> tuple[torch.Tensor, torch.Tensor]: r"""Combine transformations between two reference frames into a stationary frame. It performs the following transformation operation: :math:`T_{02} = T_{01} \times T_{12}`, where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B. Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). t12: Position of frame 2 w.r.t. frame 1. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. q12: Quaternion orientation of frame 2 w.r.t. frame 1 in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: A tuple containing the position and orientation of frame 2 w.r.t. frame 0. Shape of the tensors are (N, 3) and (N, 4) respectively. """ # compute orientation if q12 is not None: q02 = quat_mul(q01, q12) else: q02 = q01 # compute translation if t12 is not None: t02 = t01 + quat_apply(q01, t12) else: t02 = t01 return t02, q02
Combine transformations between two reference frames into a stationary frame. It performs the following transformation operation: :math:`T_{02} = T_{01} \times T_{12}`, where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B. Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). t12: Position of frame 2 w.r.t. frame 1. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. q12: Quaternion orientation of frame 2 w.r.t. frame 1 in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: A tuple containing the position and orientation of frame 2 w.r.t. frame 0. Shape of the tensors are (N, 3) and (N, 4) respectively.
combine_frame_transforms
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def subtract_frame_transforms( t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor | None = None, q02: torch.Tensor | None = None ) -> tuple[torch.Tensor, torch.Tensor]: r"""Subtract transformations between two reference frames into a stationary frame. It performs the following transformation operation: :math:`T_{12} = T_{01}^{-1} \times T_{02}`, where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B. Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). t02: Position of frame 2 w.r.t. frame 0. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. q02: Quaternion orientation of frame 2 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: A tuple containing the position and orientation of frame 2 w.r.t. frame 1. Shape of the tensors are (N, 3) and (N, 4) respectively. """ # compute orientation q10 = quat_inv(q01) if q02 is not None: q12 = quat_mul(q10, q02) else: q12 = q10 # compute translation if t02 is not None: t12 = quat_apply(q10, t02 - t01) else: t12 = quat_apply(q10, -t01) return t12, q12
Subtract transformations between two reference frames into a stationary frame. It performs the following transformation operation: :math:`T_{12} = T_{01}^{-1} \times T_{02}`, where :math:`T_{AB}` is the homogeneous transformation matrix from frame A to B. Args: t01: Position of frame 1 w.r.t. frame 0. Shape is (N, 3). q01: Quaternion orientation of frame 1 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). t02: Position of frame 2 w.r.t. frame 0. Shape is (N, 3). Defaults to None, in which case the position is assumed to be zero. q02: Quaternion orientation of frame 2 w.r.t. frame 0 in (w, x, y, z). Shape is (N, 4). Defaults to None, in which case the orientation is assumed to be identity. Returns: A tuple containing the position and orientation of frame 2 w.r.t. frame 1. Shape of the tensors are (N, 3) and (N, 4) respectively.
subtract_frame_transforms
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def compute_pose_error( t01: torch.Tensor, q01: torch.Tensor, t02: torch.Tensor, q02: torch.Tensor, rot_error_type: Literal["quat", "axis_angle"] = "axis_angle", ) -> tuple[torch.Tensor, torch.Tensor]: """Compute the position and orientation error between source and target frames. Args: t01: Position of source frame. Shape is (N, 3). q01: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4). t02: Position of target frame. Shape is (N, 3). q02: Quaternion orientation of target frame in (w, x, y, z). Shape is (N, 4). rot_error_type: The rotation error type to return: "quat", "axis_angle". Defaults to "axis_angle". Returns: A tuple containing position and orientation error. Shape of position error is (N, 3). Shape of orientation error depends on the value of :attr:`rot_error_type`: - If :attr:`rot_error_type` is "quat", the orientation error is returned as a quaternion. Shape is (N, 4). - If :attr:`rot_error_type` is "axis_angle", the orientation error is returned as an axis-angle vector. Shape is (N, 3). Raises: ValueError: Invalid rotation error type. """ # Compute quaternion error (i.e., difference quaternion) # Reference: https://personal.utdallas.edu/~sxb027100/dock/quaternion.html # q_current_norm = q_current * q_current_conj source_quat_norm = quat_mul(q01, quat_conjugate(q01))[:, 0] # q_current_inv = q_current_conj / q_current_norm source_quat_inv = quat_conjugate(q01) / source_quat_norm.unsqueeze(-1) # q_error = q_target * q_current_inv quat_error = quat_mul(q02, source_quat_inv) # Compute position error pos_error = t02 - t01 # return error based on specified type if rot_error_type == "quat": return pos_error, quat_error elif rot_error_type == "axis_angle": # Convert to axis-angle error axis_angle_error = axis_angle_from_quat(quat_error) return pos_error, axis_angle_error else: raise ValueError(f"Unsupported orientation error type: {rot_error_type}. Valid: 'quat', 'axis_angle'.")
Compute the position and orientation error between source and target frames. Args: t01: Position of source frame. Shape is (N, 3). q01: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4). t02: Position of target frame. Shape is (N, 3). q02: Quaternion orientation of target frame in (w, x, y, z). Shape is (N, 4). rot_error_type: The rotation error type to return: "quat", "axis_angle". Defaults to "axis_angle". Returns: A tuple containing position and orientation error. Shape of position error is (N, 3). Shape of orientation error depends on the value of :attr:`rot_error_type`: - If :attr:`rot_error_type` is "quat", the orientation error is returned as a quaternion. Shape is (N, 4). - If :attr:`rot_error_type` is "axis_angle", the orientation error is returned as an axis-angle vector. Shape is (N, 3). Raises: ValueError: Invalid rotation error type.
compute_pose_error
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def apply_delta_pose( source_pos: torch.Tensor, source_rot: torch.Tensor, delta_pose: torch.Tensor, eps: float = 1.0e-6 ) -> tuple[torch.Tensor, torch.Tensor]: """Applies delta pose transformation on source pose. The first three elements of `delta_pose` are interpreted as cartesian position displacement. The remaining three elements of `delta_pose` are interpreted as orientation displacement in the angle-axis format. Args: source_pos: Position of source frame. Shape is (N, 3). source_rot: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4).. delta_pose: Position and orientation displacements. Shape is (N, 6). eps: The tolerance to consider orientation displacement as zero. Defaults to 1.0e-6. Returns: A tuple containing the displaced position and orientation frames. Shape of the tensors are (N, 3) and (N, 4) respectively. """ # number of poses given num_poses = source_pos.shape[0] device = source_pos.device # interpret delta_pose[:, 0:3] as target position displacements target_pos = source_pos + delta_pose[:, 0:3] # interpret delta_pose[:, 3:6] as target rotation displacements rot_actions = delta_pose[:, 3:6] angle = torch.linalg.vector_norm(rot_actions, dim=1) axis = rot_actions / angle.unsqueeze(-1) # change from axis-angle to quat convention identity_quat = torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat(num_poses, 1) rot_delta_quat = torch.where( angle.unsqueeze(-1).repeat(1, 4) > eps, quat_from_angle_axis(angle, axis), identity_quat ) # TODO: Check if this is the correct order for this multiplication. target_rot = quat_mul(rot_delta_quat, source_rot) return target_pos, target_rot
Applies delta pose transformation on source pose. The first three elements of `delta_pose` are interpreted as cartesian position displacement. The remaining three elements of `delta_pose` are interpreted as orientation displacement in the angle-axis format. Args: source_pos: Position of source frame. Shape is (N, 3). source_rot: Quaternion orientation of source frame in (w, x, y, z). Shape is (N, 4).. delta_pose: Position and orientation displacements. Shape is (N, 6). eps: The tolerance to consider orientation displacement as zero. Defaults to 1.0e-6. Returns: A tuple containing the displaced position and orientation frames. Shape of the tensors are (N, 3) and (N, 4) respectively.
apply_delta_pose
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def transform_points( points: torch.Tensor, pos: torch.Tensor | None = None, quat: torch.Tensor | None = None ) -> torch.Tensor: r"""Transform input points in a given frame to a target frame. This function transform points from a source frame to a target frame. The transformation is defined by the position :math:`t` and orientation :math:`R` of the target frame in the source frame. .. math:: p_{target} = R_{target} \times p_{source} + t_{target} If the input `points` is a batch of points, the inputs `pos` and `quat` must be either a batch of positions and quaternions or a single position and quaternion. If the inputs `pos` and `quat` are a single position and quaternion, the same transformation is applied to all points in the batch. If either the inputs :attr:`pos` and :attr:`quat` are None, the corresponding transformation is not applied. Args: points: Points to transform. Shape is (N, P, 3) or (P, 3). pos: Position of the target frame. Shape is (N, 3) or (3,). Defaults to None, in which case the position is assumed to be zero. quat: Quaternion orientation of the target frame in (w, x, y, z). Shape is (N, 4) or (4,). Defaults to None, in which case the orientation is assumed to be identity. Returns: Transformed points in the target frame. Shape is (N, P, 3) or (P, 3). Raises: ValueError: If the inputs `points` is not of shape (N, P, 3) or (P, 3). ValueError: If the inputs `pos` is not of shape (N, 3) or (3,). ValueError: If the inputs `quat` is not of shape (N, 4) or (4,). """ points_batch = points.clone() # check if inputs are batched is_batched = points_batch.dim() == 3 # -- check inputs if points_batch.dim() == 2: points_batch = points_batch[None] # (P, 3) -> (1, P, 3) if points_batch.dim() != 3: raise ValueError(f"Expected points to have dim = 2 or dim = 3: got shape {points.shape}") if not (pos is None or pos.dim() == 1 or pos.dim() == 2): raise ValueError(f"Expected pos to have dim = 1 or dim = 2: got shape {pos.shape}") if not (quat is None or quat.dim() == 1 or quat.dim() == 2): raise ValueError(f"Expected quat to have dim = 1 or dim = 2: got shape {quat.shape}") # -- rotation if quat is not None: # convert to batched rotation matrix rot_mat = matrix_from_quat(quat) if rot_mat.dim() == 2: rot_mat = rot_mat[None] # (3, 3) -> (1, 3, 3) # convert points to matching batch size (N, P, 3) -> (N, 3, P) # and apply rotation points_batch = torch.matmul(rot_mat, points_batch.transpose_(1, 2)) # (N, 3, P) -> (N, P, 3) points_batch = points_batch.transpose_(1, 2) # -- translation if pos is not None: # convert to batched translation vector if pos.dim() == 1: pos = pos[None, None, :] # (3,) -> (1, 1, 3) else: pos = pos[:, None, :] # (N, 3) -> (N, 1, 3) # apply translation points_batch += pos # -- return points in same shape as input if not is_batched: points_batch = points_batch.squeeze(0) # (1, P, 3) -> (P, 3) return points_batch
Transform input points in a given frame to a target frame. This function transform points from a source frame to a target frame. The transformation is defined by the position :math:`t` and orientation :math:`R` of the target frame in the source frame. .. math:: p_{target} = R_{target} \times p_{source} + t_{target} If the input `points` is a batch of points, the inputs `pos` and `quat` must be either a batch of positions and quaternions or a single position and quaternion. If the inputs `pos` and `quat` are a single position and quaternion, the same transformation is applied to all points in the batch. If either the inputs :attr:`pos` and :attr:`quat` are None, the corresponding transformation is not applied. Args: points: Points to transform. Shape is (N, P, 3) or (P, 3). pos: Position of the target frame. Shape is (N, 3) or (3,). Defaults to None, in which case the position is assumed to be zero. quat: Quaternion orientation of the target frame in (w, x, y, z). Shape is (N, 4) or (4,). Defaults to None, in which case the orientation is assumed to be identity. Returns: Transformed points in the target frame. Shape is (N, P, 3) or (P, 3). Raises: ValueError: If the inputs `points` is not of shape (N, P, 3) or (P, 3). ValueError: If the inputs `pos` is not of shape (N, 3) or (3,). ValueError: If the inputs `quat` is not of shape (N, 4) or (4,).
transform_points
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def orthogonalize_perspective_depth(depth: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tensor: """Converts perspective depth image to orthogonal depth image. Perspective depth images contain distances measured from the camera's optical center. Meanwhile, orthogonal depth images provide the distance from the camera's image plane. This method uses the camera geometry to convert perspective depth to orthogonal depth image. The function assumes that the width and height are both greater than 1. Args: depth: The perspective depth images. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). intrinsics: The camera's calibration matrix. If a single matrix is provided, the same calibration matrix is used across all the depth images in the batch. Shape is (3, 3) or (N, 3, 3). Returns: The orthogonal depth images. Shape matches the input shape of depth images. Raises: ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3). """ # Clone inputs to avoid in-place modifications perspective_depth_batch = depth.clone() intrinsics_batch = intrinsics.clone() # Check if inputs are batched is_batched = perspective_depth_batch.dim() == 4 or ( perspective_depth_batch.dim() == 3 and perspective_depth_batch.shape[-1] != 1 ) # Track whether the last dimension was singleton add_last_dim = False if perspective_depth_batch.dim() == 4 and perspective_depth_batch.shape[-1] == 1: add_last_dim = True perspective_depth_batch = perspective_depth_batch.squeeze(dim=3) # (N, H, W, 1) -> (N, H, W) if perspective_depth_batch.dim() == 3 and perspective_depth_batch.shape[-1] == 1: add_last_dim = True perspective_depth_batch = perspective_depth_batch.squeeze(dim=2) # (H, W, 1) -> (H, W) if perspective_depth_batch.dim() == 2: perspective_depth_batch = perspective_depth_batch[None] # (H, W) -> (1, H, W) if intrinsics_batch.dim() == 2: intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) if is_batched and intrinsics_batch.shape[0] == 1: intrinsics_batch = intrinsics_batch.expand(perspective_depth_batch.shape[0], -1, -1) # (1, 3, 3) -> (N, 3, 3) # Validate input shapes if perspective_depth_batch.dim() != 3: raise ValueError(f"Expected depth images to have 2, 3, or 4 dimensions; got {depth.shape}.") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3); got {intrinsics.shape}.") # Image dimensions im_height, im_width = perspective_depth_batch.shape[1:] # Get the intrinsics parameters fx = intrinsics_batch[:, 0, 0].view(-1, 1, 1) fy = intrinsics_batch[:, 1, 1].view(-1, 1, 1) cx = intrinsics_batch[:, 0, 2].view(-1, 1, 1) cy = intrinsics_batch[:, 1, 2].view(-1, 1, 1) # Create meshgrid of pixel coordinates u_grid = torch.arange(im_width, device=depth.device, dtype=depth.dtype) v_grid = torch.arange(im_height, device=depth.device, dtype=depth.dtype) u_grid, v_grid = torch.meshgrid(u_grid, v_grid, indexing="xy") # Expand the grids for batch processing u_grid = u_grid.unsqueeze(0).expand(perspective_depth_batch.shape[0], -1, -1) v_grid = v_grid.unsqueeze(0).expand(perspective_depth_batch.shape[0], -1, -1) # Compute the squared terms for efficiency x_term = ((u_grid - cx) / fx) ** 2 y_term = ((v_grid - cy) / fy) ** 2 # Calculate the orthogonal (normal) depth orthogonal_depth = perspective_depth_batch / torch.sqrt(1 + x_term + y_term) # Restore the last dimension if it was present in the input if add_last_dim: orthogonal_depth = orthogonal_depth.unsqueeze(-1) # Return to original shape if input was not batched if not is_batched: orthogonal_depth = orthogonal_depth.squeeze(0) return orthogonal_depth
Converts perspective depth image to orthogonal depth image. Perspective depth images contain distances measured from the camera's optical center. Meanwhile, orthogonal depth images provide the distance from the camera's image plane. This method uses the camera geometry to convert perspective depth to orthogonal depth image. The function assumes that the width and height are both greater than 1. Args: depth: The perspective depth images. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). intrinsics: The camera's calibration matrix. If a single matrix is provided, the same calibration matrix is used across all the depth images in the batch. Shape is (3, 3) or (N, 3, 3). Returns: The orthogonal depth images. Shape matches the input shape of depth images. Raises: ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3).
orthogonalize_perspective_depth
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def unproject_depth(depth: torch.Tensor, intrinsics: torch.Tensor, is_ortho: bool = True) -> torch.Tensor: r"""Un-project depth image into a pointcloud. This function converts orthogonal or perspective depth images into points given the calibration matrix of the camera. It uses the following transformation based on camera geometry: .. math:: p_{3D} = K^{-1} \times [u, v, 1]^T \times d where :math:`p_{3D}` is the 3D point, :math:`d` is the depth value (measured from the image plane), :math:`u` and :math:`v` are the pixel coordinates and :math:`K` is the intrinsic matrix. The function assumes that the width and height are both greater than 1. This makes the function deal with many possible shapes of depth images and intrinsics matrices. .. note:: If :attr:`is_ortho` is False, the input depth images are transformed to orthogonal depth images by using the :meth:`orthogonalize_perspective_depth` method. Args: depth: The depth measurement. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). intrinsics: The camera's calibration matrix. If a single matrix is provided, the same calibration matrix is used across all the depth images in the batch. Shape is (3, 3) or (N, 3, 3). is_ortho: Whether the input depth image is orthogonal or perspective depth image. If True, the input depth image is considered as the *orthogonal* type, where the measurements are from the camera's image plane. If False, the depth image is considered as the *perspective* type, where the measurements are from the camera's optical center. Defaults to True. Returns: The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). Raises: ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3). """ # clone inputs to avoid in-place modifications intrinsics_batch = intrinsics.clone() # convert depth image to orthogonal if needed if not is_ortho: depth_batch = orthogonalize_perspective_depth(depth, intrinsics) else: depth_batch = depth.clone() # check if inputs are batched is_batched = depth_batch.dim() == 4 or (depth_batch.dim() == 3 and depth_batch.shape[-1] != 1) # make sure inputs are batched if depth_batch.dim() == 3 and depth_batch.shape[-1] == 1: depth_batch = depth_batch.squeeze(dim=2) # (H, W, 1) -> (H, W) if depth_batch.dim() == 2: depth_batch = depth_batch[None] # (H, W) -> (1, H, W) if depth_batch.dim() == 4 and depth_batch.shape[-1] == 1: depth_batch = depth_batch.squeeze(dim=3) # (N, H, W, 1) -> (N, H, W) if intrinsics_batch.dim() == 2: intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) # check shape of inputs if depth_batch.dim() != 3: raise ValueError(f"Expected depth images to have dim = 2 or 3 or 4: got shape {depth.shape}") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}") # get image height and width im_height, im_width = depth_batch.shape[1:] # create image points in homogeneous coordinates (3, H x W) indices_u = torch.arange(im_width, device=depth.device, dtype=depth.dtype) indices_v = torch.arange(im_height, device=depth.device, dtype=depth.dtype) img_indices = torch.stack(torch.meshgrid([indices_u, indices_v], indexing="ij"), dim=0).reshape(2, -1) pixels = torch.nn.functional.pad(img_indices, (0, 0, 0, 1), mode="constant", value=1.0) pixels = pixels.unsqueeze(0) # (3, H x W) -> (1, 3, H x W) # unproject points into 3D space points = torch.matmul(torch.inverse(intrinsics_batch), pixels) # (N, 3, H x W) points = points / points[:, -1, :].unsqueeze(1) # normalize by last coordinate # flatten depth image (N, H, W) -> (N, H x W) depth_batch = depth_batch.transpose_(1, 2).reshape(depth_batch.shape[0], -1).unsqueeze(2) depth_batch = depth_batch.expand(-1, -1, 3) # scale points by depth points_xyz = points.transpose_(1, 2) * depth_batch # (N, H x W, 3) # return points in same shape as input if not is_batched: points_xyz = points_xyz.squeeze(0) return points_xyz
Un-project depth image into a pointcloud. This function converts orthogonal or perspective depth images into points given the calibration matrix of the camera. It uses the following transformation based on camera geometry: .. math:: p_{3D} = K^{-1} \times [u, v, 1]^T \times d where :math:`p_{3D}` is the 3D point, :math:`d` is the depth value (measured from the image plane), :math:`u` and :math:`v` are the pixel coordinates and :math:`K` is the intrinsic matrix. The function assumes that the width and height are both greater than 1. This makes the function deal with many possible shapes of depth images and intrinsics matrices. .. note:: If :attr:`is_ortho` is False, the input depth images are transformed to orthogonal depth images by using the :meth:`orthogonalize_perspective_depth` method. Args: depth: The depth measurement. Shape is (H, W) or or (H, W, 1) or (N, H, W) or (N, H, W, 1). intrinsics: The camera's calibration matrix. If a single matrix is provided, the same calibration matrix is used across all the depth images in the batch. Shape is (3, 3) or (N, 3, 3). is_ortho: Whether the input depth image is orthogonal or perspective depth image. If True, the input depth image is considered as the *orthogonal* type, where the measurements are from the camera's image plane. If False, the depth image is considered as the *perspective* type, where the measurements are from the camera's optical center. Defaults to True. Returns: The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). Raises: ValueError: When depth is not of shape (H, W) or (H, W, 1) or (N, H, W) or (N, H, W, 1). ValueError: When intrinsics is not of shape (3, 3) or (N, 3, 3).
unproject_depth
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def project_points(points: torch.Tensor, intrinsics: torch.Tensor) -> torch.Tensor: r"""Projects 3D points into 2D image plane. This project 3D points into a 2D image plane. The transformation is defined by the intrinsic matrix of the camera. .. math:: \begin{align} p &= K \times p_{3D} = \\ p_{2D} &= \begin{pmatrix} u \\ v \\ d \end{pmatrix} = \begin{pmatrix} p[0] / p[2] \\ p[1] / p[2] \\ Z \end{pmatrix} \end{align} where :math:`p_{2D} = (u, v, d)` is the projected 3D point, :math:`p_{3D} = (X, Y, Z)` is the 3D point and :math:`K \in \mathbb{R}^{3 \times 3}` is the intrinsic matrix. If `points` is a batch of 3D points and `intrinsics` is a single intrinsic matrix, the same calibration matrix is applied to all points in the batch. Args: points: The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). intrinsics: Camera's calibration matrix. Shape is (3, 3) or (N, 3, 3). Returns: Projected 3D coordinates of points. Shape is (P, 3) or (N, P, 3). """ # clone the inputs to avoid in-place operations modifying the original data points_batch = points.clone() intrinsics_batch = intrinsics.clone() # check if inputs are batched is_batched = points_batch.dim() == 2 # make sure inputs are batched if points_batch.dim() == 2: points_batch = points_batch[None] # (P, 3) -> (1, P, 3) if intrinsics_batch.dim() == 2: intrinsics_batch = intrinsics_batch[None] # (3, 3) -> (1, 3, 3) # check shape of inputs if points_batch.dim() != 3: raise ValueError(f"Expected points to have dim = 3: got shape {points.shape}.") if intrinsics_batch.dim() != 3: raise ValueError(f"Expected intrinsics to have shape (3, 3) or (N, 3, 3): got shape {intrinsics.shape}.") # project points into 2D image plane points_2d = torch.matmul(intrinsics_batch, points_batch.transpose(1, 2)) points_2d = points_2d / points_2d[:, -1, :].unsqueeze(1) # normalize by last coordinate points_2d = points_2d.transpose_(1, 2) # (N, 3, P) -> (N, P, 3) # replace last coordinate with depth points_2d[:, :, -1] = points_batch[:, :, -1] # return points in same shape as input if not is_batched: points_2d = points_2d.squeeze(0) # (1, 3, P) -> (3, P) return points_2d
Projects 3D points into 2D image plane. This project 3D points into a 2D image plane. The transformation is defined by the intrinsic matrix of the camera. .. math:: \begin{align} p &= K \times p_{3D} = \\ p_{2D} &= \begin{pmatrix} u \\ v \\ d \end{pmatrix} = \begin{pmatrix} p[0] / p[2] \\ p[1] / p[2] \\ Z \end{pmatrix} \end{align} where :math:`p_{2D} = (u, v, d)` is the projected 3D point, :math:`p_{3D} = (X, Y, Z)` is the 3D point and :math:`K \in \mathbb{R}^{3 \times 3}` is the intrinsic matrix. If `points` is a batch of 3D points and `intrinsics` is a single intrinsic matrix, the same calibration matrix is applied to all points in the batch. Args: points: The 3D coordinates of points. Shape is (P, 3) or (N, P, 3). intrinsics: Camera's calibration matrix. Shape is (3, 3) or (N, 3, 3). Returns: Projected 3D coordinates of points. Shape is (P, 3) or (N, P, 3).
project_points
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def default_orientation(num: int, device: str) -> torch.Tensor: """Returns identity rotation transform. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Identity quaternion in (w, x, y, z). Shape is (num, 4). """ quat = torch.zeros((num, 4), dtype=torch.float, device=device) quat[..., 0] = 1.0 return quat
Returns identity rotation transform. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Identity quaternion in (w, x, y, z). Shape is (num, 4).
default_orientation
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def random_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation in 3D as quaternion. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Sampled quaternion in (w, x, y, z). Shape is (num, 4). Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html """ # sample random orientation from normal distribution quat = torch.randn((num, 4), dtype=torch.float, device=device) # normalize the quaternion return torch.nn.functional.normalize(quat, p=2.0, dim=-1, eps=1e-12)
Returns sampled rotation in 3D as quaternion. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Sampled quaternion in (w, x, y, z). Shape is (num, 4). Reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.random.html
random_orientation
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def random_yaw_orientation(num: int, device: str) -> torch.Tensor: """Returns sampled rotation around z-axis. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Sampled quaternion in (w, x, y, z). Shape is (num, 4). """ roll = torch.zeros(num, dtype=torch.float, device=device) pitch = torch.zeros(num, dtype=torch.float, device=device) yaw = 2 * torch.pi * torch.rand(num, dtype=torch.float, device=device) return quat_from_euler_xyz(roll, pitch, yaw)
Returns sampled rotation around z-axis. Args: num: The number of rotations to sample. device: Device to create tensor on. Returns: Sampled quaternion in (w, x, y, z). Shape is (num, 4).
random_yaw_orientation
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def sample_triangle(lower: float, upper: float, size: int | tuple[int, ...], device: str) -> torch.Tensor: """Randomly samples tensor from a triangular distribution. Args: lower: The lower range of the sampled tensor. upper: The upper range of the sampled tensor. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`. """ # convert to tuple if isinstance(size, int): size = (size,) # create random tensor in the range [-1, 1] r = 2 * torch.rand(*size, device=device) - 1 # convert to triangular distribution r = torch.where(r < 0.0, -torch.sqrt(-r), torch.sqrt(r)) # rescale back to [0, 1] r = (r + 1.0) / 2.0 # rescale to range [lower, upper] return (upper - lower) * r + lower
Randomly samples tensor from a triangular distribution. Args: lower: The lower range of the sampled tensor. upper: The upper range of the sampled tensor. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`.
sample_triangle
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def sample_uniform( lower: torch.Tensor | float, upper: torch.Tensor | float, size: int | tuple[int, ...], device: str ) -> torch.Tensor: """Sample uniformly within a range. Args: lower: Lower bound of uniform range. upper: Upper bound of uniform range. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`. """ # convert to tuple if isinstance(size, int): size = (size,) # return tensor return torch.rand(*size, device=device) * (upper - lower) + lower
Sample uniformly within a range. Args: lower: Lower bound of uniform range. upper: Upper bound of uniform range. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`.
sample_uniform
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def sample_log_uniform( lower: torch.Tensor | float, upper: torch.Tensor | float, size: int | tuple[int, ...], device: str ) -> torch.Tensor: r"""Sample using log-uniform distribution within a range. The log-uniform distribution is defined as a uniform distribution in the log-space. It is useful for sampling values that span several orders of magnitude. The sampled values are uniformly distributed in the log-space and then exponentiated to get the final values. .. math:: x = \exp(\text{uniform}(\log(\text{lower}), \log(\text{upper}))) Args: lower: Lower bound of uniform range. upper: Upper bound of uniform range. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`. """ # cast to tensor if not already if not isinstance(lower, torch.Tensor): lower = torch.tensor(lower, dtype=torch.float, device=device) if not isinstance(upper, torch.Tensor): upper = torch.tensor(upper, dtype=torch.float, device=device) # sample in log-space and exponentiate return torch.exp(sample_uniform(torch.log(lower), torch.log(upper), size, device))
Sample using log-uniform distribution within a range. The log-uniform distribution is defined as a uniform distribution in the log-space. It is useful for sampling values that span several orders of magnitude. The sampled values are uniformly distributed in the log-space and then exponentiated to get the final values. .. math:: x = \exp(\text{uniform}(\log(\text{lower}), \log(\text{upper}))) Args: lower: Lower bound of uniform range. upper: Upper bound of uniform range. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is based on :attr:`size`.
sample_log_uniform
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def sample_gaussian( mean: torch.Tensor | float, std: torch.Tensor | float, size: int | tuple[int, ...], device: str ) -> torch.Tensor: """Sample using gaussian distribution. Args: mean: Mean of the gaussian. std: Std of the gaussian. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. """ if isinstance(mean, float): if isinstance(size, int): size = (size,) return torch.normal(mean=mean, std=std, size=size).to(device=device) else: return torch.normal(mean=mean, std=std).to(device=device)
Sample using gaussian distribution. Args: mean: Mean of the gaussian. std: Std of the gaussian. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor.
sample_gaussian
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def sample_cylinder( radius: float, h_range: tuple[float, float], size: int | tuple[int, ...], device: str ) -> torch.Tensor: """Sample 3D points uniformly on a cylinder's surface. The cylinder is centered at the origin and aligned with the z-axis. The height of the cylinder is sampled uniformly from the range :obj:`h_range`, while the radius is fixed to :obj:`radius`. The sampled points are returned as a tensor of shape :obj:`(*size, 3)`, i.e. the last dimension contains the x, y, and z coordinates of the sampled points. Args: radius: The radius of the cylinder. h_range: The minimum and maximum height of the cylinder. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is :obj:`(*size, 3)`. """ # sample angles angles = (torch.rand(size, device=device) * 2 - 1) * torch.pi h_min, h_max = h_range # add shape if isinstance(size, int): size = (size, 3) else: size += (3,) # allocate a tensor xyz = torch.zeros(size, device=device) xyz[..., 0] = radius * torch.cos(angles) xyz[..., 1] = radius * torch.sin(angles) xyz[..., 2].uniform_(h_min, h_max) # return positions return xyz
Sample 3D points uniformly on a cylinder's surface. The cylinder is centered at the origin and aligned with the z-axis. The height of the cylinder is sampled uniformly from the range :obj:`h_range`, while the radius is fixed to :obj:`radius`. The sampled points are returned as a tensor of shape :obj:`(*size, 3)`, i.e. the last dimension contains the x, y, and z coordinates of the sampled points. Args: radius: The radius of the cylinder. h_range: The minimum and maximum height of the cylinder. size: The shape of the tensor. device: Device to create tensor on. Returns: Sampled tensor. Shape is :obj:`(*size, 3)`.
sample_cylinder
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def convert_camera_frame_orientation_convention( orientation: torch.Tensor, origin: Literal["opengl", "ros", "world"] = "opengl", target: Literal["opengl", "ros", "world"] = "ros", ) -> torch.Tensor: r"""Converts a quaternion representing a rotation from one convention to another. In USD, the camera follows the ``"opengl"`` convention. Thus, it is always in **Y up** convention. This means that the camera is looking down the -Z axis with the +Y axis pointing up , and +X axis pointing right. However, in ROS, the camera is looking down the +Z axis with the +Y axis pointing down, and +X axis pointing right. Thus, the camera needs to be rotated by :math:`180^{\circ}` around the X axis to follow the ROS convention. .. math:: T_{ROS} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} On the other hand, the typical world coordinate system is with +X pointing forward, +Y pointing left, and +Z pointing up. The camera can also be set in this convention by rotating the camera by :math:`90^{\circ}` around the X axis and :math:`-90^{\circ}` around the Y axis. .. math:: T_{WORLD} = \begin{bmatrix} 0 & 0 & -1 & 0 \\ -1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} Thus, based on their application, cameras follow different conventions for their orientation. This function converts a quaternion from one convention to another. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention Args: orientation: Quaternion of form `(w, x, y, z)` with shape (..., 4) in source convention. origin: Convention to convert from. Defaults to "opengl". target: Convention to convert to. Defaults to "ros". Returns: Quaternion of form `(w, x, y, z)` with shape (..., 4) in target convention """ if target == origin: return orientation.clone() # -- unify input type if origin == "ros": # convert from ros to opengl convention rotm = matrix_from_quat(orientation) rotm[:, :, 2] = -rotm[:, :, 2] rotm[:, :, 1] = -rotm[:, :, 1] # convert to opengl convention quat_gl = quat_from_matrix(rotm) elif origin == "world": # convert from world (x forward and z up) to opengl convention rotm = matrix_from_quat(orientation) rotm = torch.matmul( rotm, matrix_from_euler(torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ"), ) # convert to isaac-sim convention quat_gl = quat_from_matrix(rotm) else: quat_gl = orientation # -- convert to target convention if target == "ros": # convert from opengl to ros convention rotm = matrix_from_quat(quat_gl) rotm[:, :, 2] = -rotm[:, :, 2] rotm[:, :, 1] = -rotm[:, :, 1] return quat_from_matrix(rotm) elif target == "world": # convert from opengl to world (x forward and z up) convention rotm = matrix_from_quat(quat_gl) rotm = torch.matmul( rotm, matrix_from_euler(torch.tensor([math.pi / 2, -math.pi / 2, 0], device=orientation.device), "XYZ").T, ) return quat_from_matrix(rotm) else: return quat_gl.clone()
Converts a quaternion representing a rotation from one convention to another. In USD, the camera follows the ``"opengl"`` convention. Thus, it is always in **Y up** convention. This means that the camera is looking down the -Z axis with the +Y axis pointing up , and +X axis pointing right. However, in ROS, the camera is looking down the +Z axis with the +Y axis pointing down, and +X axis pointing right. Thus, the camera needs to be rotated by :math:`180^{\circ}` around the X axis to follow the ROS convention. .. math:: T_{ROS} = \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & -1 & 0 & 0 \\ 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} On the other hand, the typical world coordinate system is with +X pointing forward, +Y pointing left, and +Z pointing up. The camera can also be set in this convention by rotating the camera by :math:`90^{\circ}` around the X axis and :math:`-90^{\circ}` around the Y axis. .. math:: T_{WORLD} = \begin{bmatrix} 0 & 0 & -1 & 0 \\ -1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} T_{USD} Thus, based on their application, cameras follow different conventions for their orientation. This function converts a quaternion from one convention to another. Possible conventions are: - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention Args: orientation: Quaternion of form `(w, x, y, z)` with shape (..., 4) in source convention. origin: Convention to convert from. Defaults to "opengl". target: Convention to convert to. Defaults to "ros". Returns: Quaternion of form `(w, x, y, z)` with shape (..., 4) in target convention
convert_camera_frame_orientation_convention
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def create_rotation_matrix_from_view( eyes: torch.Tensor, targets: torch.Tensor, up_axis: Literal["Y", "Z"] = "Z", device: str = "cpu", ) -> torch.Tensor: """Compute the rotation matrix from world to view coordinates. This function takes a vector ''eyes'' which specifies the location of the camera in world coordinates and the vector ''targets'' which indicate the position of the object. The output is a rotation matrix representing the transformation from world coordinates -> view coordinates. The inputs eyes and targets can each be a - 3 element tuple/list - torch tensor of shape (1, 3) - torch tensor of shape (N, 3) Args: eyes: Position of the camera in world coordinates. targets: Position of the object in world coordinates. up_axis: The up axis of the camera. Defaults to "Z". device: The device to create torch tensors on. Defaults to "cpu". The vectors are broadcast against each other so they all have shape (N, 3). Returns: R: (N, 3, 3) batched rotation matrices Reference: Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/eaf0709d6af0025fe94d1ee7cec454bc3054826a/pytorch3d/renderer/cameras.py#L1635-L1685) """ if up_axis == "Y": up_axis_vec = torch.tensor((0, 1, 0), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1) elif up_axis == "Z": up_axis_vec = torch.tensor((0, 0, 1), device=device, dtype=torch.float32).repeat(eyes.shape[0], 1) else: raise ValueError(f"Invalid up axis: {up_axis}. Valid options are 'Y' and 'Z'.") # get rotation matrix in opengl format (-Z forward, +Y up) z_axis = -torch.nn.functional.normalize(targets - eyes, eps=1e-5) x_axis = torch.nn.functional.normalize(torch.cross(up_axis_vec, z_axis, dim=1), eps=1e-5) y_axis = torch.nn.functional.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5) is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all(dim=1, keepdim=True) if is_close.any(): replacement = torch.nn.functional.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5) x_axis = torch.where(is_close, replacement, x_axis) R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1) return R.transpose(1, 2)
Compute the rotation matrix from world to view coordinates. This function takes a vector ''eyes'' which specifies the location of the camera in world coordinates and the vector ''targets'' which indicate the position of the object. The output is a rotation matrix representing the transformation from world coordinates -> view coordinates. The inputs eyes and targets can each be a - 3 element tuple/list - torch tensor of shape (1, 3) - torch tensor of shape (N, 3) Args: eyes: Position of the camera in world coordinates. targets: Position of the object in world coordinates. up_axis: The up axis of the camera. Defaults to "Z". device: The device to create torch tensors on. Defaults to "cpu". The vectors are broadcast against each other so they all have shape (N, 3). Returns: R: (N, 3, 3) batched rotation matrices Reference: Based on PyTorch3D (https://github.com/facebookresearch/pytorch3d/blob/eaf0709d6af0025fe94d1ee7cec454bc3054826a/pytorch3d/renderer/cameras.py#L1635-L1685)
create_rotation_matrix_from_view
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/math_utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
Apache-2.0
def __init__( self, ref_motion: dict, body_ids: List[int] | None = None, joint_ids: List[int] | None = None, ): """Represents the state of a reference motion frame. Args: ref_motion (dict): Reference motion data at a specific point in time, loaded from the dataset. body_ids (List[int] | None, optional): Desired ordering of the bodies. Defaults to None. joint_ids (List[int] | None, optional): Desired ordering of the joints. Defaults to None. """ if body_ids is None: num_bodies = ref_motion["rg_pos"].shape[1] body_ids = list(range(0, num_bodies)) if joint_ids is None: num_joints = ref_motion["dof_pos"].shape[1] joint_ids = list(range(0, num_joints)) # Root self.root_pos = ref_motion["root_pos"] self.root_rot = ref_motion["root_rot"] self.root_lin_vel = ref_motion["root_vel"] self.root_ang_vel = ref_motion["root_ang_vel"] # Links self.body_pos = ref_motion["rg_pos"][:, body_ids, :] self.body_rot = ref_motion["rb_rot"][:, body_ids, :] # [num_envs, num_markers, 4] self.body_lin_vel = ref_motion["body_vel"][:, body_ids, :] # [num_envs, num_markers, 3] self.body_ang_vel = ref_motion["body_ang_vel"][:, body_ids, :] # [num_envs, num_markers, 3] # Extended links self.body_pos_extend = ref_motion["rg_pos_t"] self.body_rot_extend = ref_motion["rg_rot_t"] self.body_lin_vel_extend = ref_motion["body_vel_t"] self.body_ang_vel_extend = ref_motion["body_ang_vel_t"] self.joint_pos = ref_motion["dof_pos"][:, joint_ids] self.joint_vel = ref_motion["dof_vel"][:, joint_ids]
Represents the state of a reference motion frame. Args: ref_motion (dict): Reference motion data at a specific point in time, loaded from the dataset. body_ids (List[int] | None, optional): Desired ordering of the bodies. Defaults to None. joint_ids (List[int] | None, optional): Desired ordering of the joints. Defaults to None.
__init__
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/reference_motion.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/reference_motion.py
Apache-2.0
def __init__( self, cfg: ReferenceMotionManagerCfg, device: torch.device, num_envs: int, random_sample: bool, extend_head: bool, dt: float, ): """Initializes a reference motion manager that loads and queries a motion dataset. Args: cfg (ReferenceMotionManagerCfg): Configuration that specifies dataset and skeleton paths. device (torch.device): Device to host tensors on. num_envs (int): Number of environments/instances. random_sample (bool): Whether to randomly sample the dataset. extend_head (bool): Whether to extend the head of the body for specific robots, e.g. H1. dt (float): Length of a policy time step, which is the length of a physics time steps multiplied by decimation. """ self._device = device self._num_envs = num_envs self._dt = dt self._motion_lib = MotionLibH1( motion_file=cfg.motion_path, mjcf_file=cfg.skeleton_path, device=self._device, masterfoot_conifg=None, fix_height=False, multi_thread=False, extend_head=extend_head, ) self._skeleton_trees = [SkeletonTree.from_mjcf(cfg.skeleton_path)] * self._num_envs self._motion_ids = torch.arange(self._num_envs).to(self._device) self._motion_start_times = torch.zeros( self._num_envs, dtype=torch.float32, device=self._device, requires_grad=False ) self.load_motions(random_sample=random_sample, start_idx=0)
Initializes a reference motion manager that loads and queries a motion dataset. Args: cfg (ReferenceMotionManagerCfg): Configuration that specifies dataset and skeleton paths. device (torch.device): Device to host tensors on. num_envs (int): Number of environments/instances. random_sample (bool): Whether to randomly sample the dataset. extend_head (bool): Whether to extend the head of the body for specific robots, e.g. H1. dt (float): Length of a policy time step, which is the length of a physics time steps multiplied by decimation.
__init__
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/reference_motion.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/reference_motion.py
Apache-2.0
def load_motions(self, random_sample: bool, start_idx: int): """Loads motions from the motion dataset.""" self._motion_lib.load_motions( skeleton_trees=self._skeleton_trees, gender_betas=[torch.zeros(17)] * self._num_envs, limb_weights=[np.zeros(10)] * self._num_envs, random_sample=random_sample, start_idx=start_idx, ) self._motion_len = self._motion_lib.get_motion_length(self._motion_ids) self.reset_motion_start_times(env_ids=self._motion_ids, sample=False)
Loads motions from the motion dataset.
load_motions
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/reference_motion.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/reference_motion.py
Apache-2.0
def reset_motion_start_times(self, env_ids: torch.Tensor, sample: bool): """Resets the time at which the reference motions start playing.""" if sample: self._motion_start_times[env_ids] = self._motion_lib.sample_time(self._motion_ids[env_ids]) else: self._motion_start_times[env_ids] = 0
Resets the time at which the reference motions start playing.
reset_motion_start_times
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/reference_motion.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/reference_motion.py
Apache-2.0
def episodes_exceed_motion_length( self, episode_times: torch.Tensor, env_ids: torch.Tensor | None = None ) -> torch.Tensor: """Checks if the reference motion has reached to the end.""" if env_ids is None: return (episode_times + self._motion_start_times) > self._motion_len return (episode_times[env_ids] + self._motion_start_times[env_ids]) > self._motion_len[env_ids]
Checks if the reference motion has reached to the end.
episodes_exceed_motion_length
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/reference_motion.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/reference_motion.py
Apache-2.0
def get_state_from_motion_lib_cache( self, episode_length_buf: torch.Tensor, terrain_heights: torch.Tensor | None = None, offset: torch.Tensor | None = None, quaternion_is_xyzw=True, ) -> ReferenceMotionState: """Query a reference motion frame from motion lib.""" motion_times = episode_length_buf * self._dt + self._motion_start_times motion_res = self._motion_lib.get_motion_state(self._motion_ids, motion_times, offset=offset) if terrain_heights is not None: delta_height = terrain_heights.clone() if offset is not None: delta_height -= offset[:, 2].unsqueeze(1) motion_res["root_pos"][:, 2] += delta_height.flatten() if "rg_pos" in motion_res: motion_res["rg_pos"][:, :, 2] += delta_height if "rg_pos_t" in motion_res: motion_res["rg_pos_t"][:, :, 2] += delta_height # Update quaternion convention if quaternion_is_xyzw: for key, value in motion_res.items(): if value.shape[-1] == 4: motion_res[key] = value[..., [3, 0, 1, 2]] return ReferenceMotionState(motion_res)
Query a reference motion frame from motion lib.
get_state_from_motion_lib_cache
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/reference_motion.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/reference_motion.py
Apache-2.0
def check_termination_conditions( training_mode: bool, projected_gravity: torch.Tensor, gravity_x_threshold: float, gravity_y_threshold: float, ref_motion_mgr: ReferenceMotionManager, episode_times: torch.Tensor, body_state: BodyState, ref_motion_state: ReferenceMotionState, max_ref_motion_dist: float, in_recovery: torch.Tensor | None, mask: torch.Tensor, net_contact_forces: torch.Tensor, undesired_contact_body_ids: torch.Tensor, ) -> tuple[torch.Tensor, dict]: """ Evaluates termination conditions. This function checks various termination conditions and returns a boolean tensor of shape (num_env,) indicating whether any condition has been met. Additionally, it provides a dictionary mapping each condition's name to its activation state, with each state represented as a boolean tensor of shape (num_env,). Returns: torch.Tensor: A boolean tensor of shape (num_env,) where a value of True indicates that at least one termination condition is active, and False indicates none are active. dict: A dictionary where keys are the names of the termination conditions and values are boolean tensors of shape (num_env,) indicating whether each condition is active. """ conditions = { "gravity": terminate_by_gravity( projected_gravity=projected_gravity, gravity_x_threshold=gravity_x_threshold, gravity_y_threshold=gravity_y_threshold, ), "undesired_contact": terminate_by_undesired_contact( net_contact_forces=net_contact_forces, undesired_contact_body_ids=undesired_contact_body_ids, ), "reference_motion_length": terminate_by_reference_motion_length( ref_motion_mgr=ref_motion_mgr, episode_times=episode_times ), "reference_motion_distance": terminate_by_reference_motion_distance( training_mode=training_mode, body_state=body_state, ref_motion_state=ref_motion_state, max_ref_motion_dist=max_ref_motion_dist, in_recovery=in_recovery, mask=mask, ), } should_terminate = torch.any(torch.cat(list(conditions.values()), dim=1), dim=1) return should_terminate, conditions
Evaluates termination conditions. This function checks various termination conditions and returns a boolean tensor of shape (num_env,) indicating whether any condition has been met. Additionally, it provides a dictionary mapping each condition's name to its activation state, with each state represented as a boolean tensor of shape (num_env,). Returns: torch.Tensor: A boolean tensor of shape (num_env,) where a value of True indicates that at least one termination condition is active, and False indicates none are active. dict: A dictionary where keys are the names of the termination conditions and values are boolean tensors of shape (num_env,) indicating whether each condition is active.
check_termination_conditions
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/termination.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/termination.py
Apache-2.0
def terminate_by_gravity( projected_gravity: torch.Tensor, gravity_x_threshold: float, gravity_y_threshold: float, ) -> torch.Tensor: """ Checks termination condition based on robot balance. This function evaluates whether the robot is unbalanced due to gravity and returns a boolean tensor of shape (num_env, 1). Each element in the tensor indicates whether the unbalanced condition is active for the corresponding environment. Returns: torch.Tensor: A boolean tensor of shape (num_env, 1) where each value indicates whether the unbalanced termination condition is active (True) or not (False) for each environment. """ # Apply threshold to the robot's projection of the gravity direction on base frame. abs_projected_gravity = torch.abs(projected_gravity) return torch.any( torch.logical_or( abs_projected_gravity[:, 0].unsqueeze(1) > gravity_x_threshold, abs_projected_gravity[:, 1].unsqueeze(1) > gravity_y_threshold, ), dim=1, keepdim=True, )
Checks termination condition based on robot balance. This function evaluates whether the robot is unbalanced due to gravity and returns a boolean tensor of shape (num_env, 1). Each element in the tensor indicates whether the unbalanced condition is active for the corresponding environment. Returns: torch.Tensor: A boolean tensor of shape (num_env, 1) where each value indicates whether the unbalanced termination condition is active (True) or not (False) for each environment.
terminate_by_gravity
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/termination.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/termination.py
Apache-2.0
def terminate_by_undesired_contact( net_contact_forces: torch.Tensor, undesired_contact_body_ids: torch.Tensor ) -> torch.Tensor: """ Checks termination condition based on contact forces. This function evaluates whether undesired bodies of the robot are in contact and returns a boolean tensor of shape (num_env, 1). Each element in the tensor indicates whether the contact termination condition is active for the corresponding environment. Args: net_contact_forces (torch.Tensor): A tensor of shape (num_env, num_bodies, 3). undesired_contact_body_ids (torch.Tensor): A tensor of shape (num_undesired_bodies,). Returns: torch.Tensor: A boolean tensor of shape (num_env, 1) where each value indicates whether the contact termination condition is active (True) or not (False) for each environment. """ # Index 0 selects the most recent contact force in the history undesired_bodies_contact_forces = net_contact_forces[:, undesired_contact_body_ids] undesired_bodies_contact_forces = torch.norm(undesired_bodies_contact_forces, dim=-1) max_contact_force = torch.max(undesired_bodies_contact_forces, dim=1).values contact_force_threshold = 1.0 return (max_contact_force > contact_force_threshold).view(-1, 1)
Checks termination condition based on contact forces. This function evaluates whether undesired bodies of the robot are in contact and returns a boolean tensor of shape (num_env, 1). Each element in the tensor indicates whether the contact termination condition is active for the corresponding environment. Args: net_contact_forces (torch.Tensor): A tensor of shape (num_env, num_bodies, 3). undesired_contact_body_ids (torch.Tensor): A tensor of shape (num_undesired_bodies,). Returns: torch.Tensor: A boolean tensor of shape (num_env, 1) where each value indicates whether the contact termination condition is active (True) or not (False) for each environment.
terminate_by_undesired_contact
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/termination.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/termination.py
Apache-2.0
def terminate_by_reference_motion_length( ref_motion_mgr: ReferenceMotionManager, episode_times: torch.Tensor ) -> torch.Tensor: """Checks if the reference motion has ended for each environment. This function returns a boolean tensor of shape (num_env, 1), where each element indicates whether the reference motion has ended for the corresponding environment. Returns: torch.tensor: A boolean tensor of shape (num_env, 1) where `True` indicates that the episode should terminate. """ return ref_motion_mgr.episodes_exceed_motion_length(episode_times=episode_times).unsqueeze(1)
Checks if the reference motion has ended for each environment. This function returns a boolean tensor of shape (num_env, 1), where each element indicates whether the reference motion has ended for the corresponding environment. Returns: torch.tensor: A boolean tensor of shape (num_env, 1) where `True` indicates that the episode should terminate.
terminate_by_reference_motion_length
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/termination.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/termination.py
Apache-2.0
def terminate_by_reference_motion_distance( training_mode: bool, body_state: BodyState, ref_motion_state: ReferenceMotionState, max_ref_motion_dist: float, in_recovery: torch.Tensor | None, mask: torch.Tensor, ) -> torch.Tensor: """ Determines if the distance between current body positions and reference motion positions exceeds the allowed threshold. Args: is_training (bool): Flag indicating if the system is in training mode. body_state (BodyState): Current state of the humanoid bodies. ref_motion_state (ReferenceMotionState): Reference motion state for the humanoid to track. max_ref_motion_dist (float): Maximum allowable distance between current and reference body positions. in_recovery (torch.Tensor | None): Boolean tensor (num_envs, 1) indicating if each environment is in recovery mode. If None, the recovery condition is ignored. Returns: torch.Tensor: Boolean tensor (num_envs, 1) indicating if the termination condition is met for each instance. """ body_positions = body_state.body_pos_extend reference_positions = ref_motion_state.body_pos_extend num_bodies = body_positions.shape[1] body_mask = mask[:, :num_bodies] # Calculate the distance between current and reference positions distance = torch.norm(body_positions - reference_positions, dim=-1) * body_mask if training_mode: # Check if any distance exceeds the threshold exceeds_threshold = torch.any(distance > max_ref_motion_dist, dim=-1, keepdim=True) else: # Check if the mean distance exceeds the threshold mean_distance = distance.mean(dim=-1, keepdim=True) exceeds_threshold = torch.any(mean_distance > max_ref_motion_dist, dim=-1, keepdim=True) if in_recovery is None: return exceeds_threshold # If in recovery, ensure we only terminate if not in recovery mode return torch.logical_and(exceeds_threshold, ~in_recovery)
Determines if the distance between current body positions and reference motion positions exceeds the allowed threshold. Args: is_training (bool): Flag indicating if the system is in training mode. body_state (BodyState): Current state of the humanoid bodies. ref_motion_state (ReferenceMotionState): Reference motion state for the humanoid to track. max_ref_motion_dist (float): Maximum allowable distance between current and reference body positions. in_recovery (torch.Tensor | None): Boolean tensor (num_envs, 1) indicating if each environment is in recovery mode. If None, the recovery condition is ignored. Returns: torch.Tensor: Boolean tensor (num_envs, 1) indicating if the termination condition is met for each instance.
terminate_by_reference_motion_distance
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/termination.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/termination.py
Apache-2.0
def assert_equal(lhs: any, rhs: any, name: str): """Assert that 2 values are equal and provide a useful error if not. Args: lhs: First value to compare rhs: Second value to compare name: Description of what is being compared, used in error messages """ # Handle dictionary comparisons if isinstance(lhs, dict) and isinstance(rhs, dict): _assert_dicts_equal(lhs, rhs, name) # Handle numeric comparisons elif isinstance(lhs, (int, float)) and isinstance(rhs, (int, float)): _assert_numbers_equal(lhs, rhs, name) # Handle all other types else: _assert_values_equal(lhs, rhs, name)
Assert that 2 values are equal and provide a useful error if not. Args: lhs: First value to compare rhs: Second value to compare name: Description of what is being compared, used in error messages
assert_equal
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/util.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/util.py
Apache-2.0
def _assert_dicts_equal(lhs: dict, rhs: dict, name: str): """Compare two dictionaries and raise assertion error with details if not equal.""" lhs_keys = set(lhs.keys()) rhs_keys = set(rhs.keys()) # Check for missing keys only_in_lhs = lhs_keys - rhs_keys only_in_rhs = rhs_keys - lhs_keys # Check for value differences diff_values = _get_differing_values(lhs, rhs, lhs_keys & rhs_keys) # Build error message if there are any differences error_parts = [] if only_in_lhs: error_parts.append(f"Keys only in first dict: {only_in_lhs}") if only_in_rhs: error_parts.append(f"Keys only in second dict: {only_in_rhs}") if diff_values: error_parts.append(f"Keys with different values: {diff_values}") if error_parts: raise AssertionError(f"{name}: Dictionaries are not equal:\n" + "\n".join(error_parts))
Compare two dictionaries and raise assertion error with details if not equal.
_assert_dicts_equal
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/util.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/util.py
Apache-2.0
def _get_differing_values(lhs: dict, rhs: dict, common_keys: set) -> dict: """Compare values for common keys between two dicts, return dict of differences.""" diff_values = {} for key in common_keys: if isinstance(lhs[key], (int, float)) and isinstance(rhs[key], (int, float)): if abs(lhs[key] - rhs[key]) >= 1e-6: diff_values[key] = (lhs[key], rhs[key]) elif lhs[key] != rhs[key]: diff_values[key] = (lhs[key], rhs[key]) return diff_values
Compare values for common keys between two dicts, return dict of differences.
_get_differing_values
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/util.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/util.py
Apache-2.0
def _assert_numbers_equal(lhs: float, rhs: float, name: str): """Assert that two numbers are equal within a small tolerance.""" if abs(lhs - rhs) >= 1e-6: raise AssertionError(f"{name}: Values are not equal within tolerance: {lhs} != {rhs}")
Assert that two numbers are equal within a small tolerance.
_assert_numbers_equal
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/util.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/util.py
Apache-2.0
def _assert_values_equal(lhs: any, rhs: any, name: str): """Assert that two non-numeric values are exactly equal.""" if lhs != rhs: raise AssertionError(f"{name}: Values are not equal: {lhs} != {rhs}")
Assert that two non-numeric values are exactly equal.
_assert_values_equal
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/util.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/util.py
Apache-2.0
def get_matching_indices(patterns: list[str], values: list[str], allow_empty: bool = False) -> list[int]: """Get indices of all elements in values that match any of the regex patterns.""" all_indices = set() for pattern in patterns: regex = re.compile(pattern) indices = [i for i, v in enumerate(values) if regex.match(v)] if len(indices) == 0 and not allow_empty: raise ValueError(f"No matching indices found for pattern {pattern} in {values}") all_indices.update(indices) return list(all_indices)
Get indices of all elements in values that match any of the regex patterns.
get_matching_indices
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/util.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/util.py
Apache-2.0
def create_dummy_warp_module(): """Creates a dummy 'warp' module with necessary attributes and submodules. This function is created because the 'warp' module can only be properly imported when Isaac Sim is also launched, which is not always required when running unit tests or simulations with other simulators. By creating a dummy module, we can avoid import errors and ensure that the dependent modules can be tested without requiring Isaac Sim to be running. """ # Step 1: Create the main dummy module dummy_module = types.ModuleType("warp") # Step 2: Create the submodule 'torch' torch_submodule = types.ModuleType("torch") # Step 3: Define the array class class Array: def __init__(self, value): self.value = value def __repr__(self): return f"Array(value={self.value})" # Define torch.to_torch and torch.from_torch functions def to_torch(value): return f"Converted {value} to torch" def from_torch(value): return f"Converted {value} from torch" # Step 4: Add the class to the main dummy module dummy_module.array = Array # Add the functions to the torch submodule torch_submodule.to_torch = to_torch torch_submodule.from_torch = from_torch # Add the torch submodule to the main dummy module dummy_module.torch = torch_submodule # Step 5: Insert the main dummy module into sys.modules sys.modules["warp"] = dummy_module
Creates a dummy 'warp' module with necessary attributes and submodules. This function is created because the 'warp' module can only be properly imported when Isaac Sim is also launched, which is not always required when running unit tests or simulations with other simulators. By creating a dummy module, we can avoid import errors and ensure that the dependent modules can be tested without requiring Isaac Sim to be running.
create_dummy_warp_module
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/__init__.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/__init__.py
Apache-2.0
def __init__(self, num_envs: int, device: torch.device, entry_length: int, max_entries: int): """ Args: env (EnvironmentWrapper): An instance of the environment wrapper. entry_length (int): The length of a single entry. max_entries (int): The maximum number of entries to keep in the history. """ self._entries = torch.zeros( num_envs, entry_length * max_entries, device=device, ) self._entry_length = entry_length self._entry_shape = torch.Size([num_envs, entry_length])
Args: env (EnvironmentWrapper): An instance of the environment wrapper. entry_length (int): The length of a single entry. max_entries (int): The maximum number of entries to keep in the history.
__init__
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_history.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_history.py
Apache-2.0
def update(self, obs_dict: dict[str, torch.Tensor]): """Updates the history with a new entry. Args: obs_dict (dict[str, torch.Tensor]): A dictionary containing the latest student observations. Expected keys are "distilled_robot_state" and "distilled_last_action". Raises: AssertionError: If the new entry does not match the expected shape. KeyError: If the observation does not contain expected keys. """ new_entry = torch.cat( [obs_dict["distilled_robot_state"], obs_dict["distilled_last_action"]], dim=-1, ) assert new_entry.shape == self._entry_shape, "New entry has an unexpected shape." self._entries[:, self._entry_length :] = self._entries[:, : -self._entry_length].clone() self._entries[:, : self._entry_length] = new_entry.clone()
Updates the history with a new entry. Args: obs_dict (dict[str, torch.Tensor]): A dictionary containing the latest student observations. Expected keys are "distilled_robot_state" and "distilled_last_action". Raises: AssertionError: If the new entry does not match the expected shape. KeyError: If the observation does not contain expected keys.
update
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_history.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_history.py
Apache-2.0
def reset(self, env_ids: torch.Tensor | None): """Resets the history for specified environments. Args: env_ids (torch.Tensor | None): A tensor containing the IDs of the environments to reset. If None, all environments are reset. Example: history.reset(None) # Resets history for all environments history.reset(torch.tensor([0, 2])) # Resets history for environments with IDs 0 and 2 """ if env_ids is None: self._entries[:] = 0.0 self._entries[env_ids, ...] = 0.0
Resets the history for specified environments. Args: env_ids (torch.Tensor | None): A tensor containing the IDs of the environments to reset. If None, all environments are reset. Example: history.reset(None) # Resets history for all environments history.reset(torch.tensor([0, 2])) # Resets history for environments with IDs 0 and 2
reset
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_history.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_history.py
Apache-2.0
def compute_distilled_imitation_observations( ref_motion_state: ReferenceMotionState, body_state: BodyState, mask: torch.Tensor, ref_episodic_offset: torch.Tensor | None, ) -> torch.Tensor: """Computes the reference goal state used in the observation of the student.""" # First we get all reference states. kinematic_command = compute_kinematic_command(ref_motion_state, body_state, ref_episodic_offset) joint_command = compute_joint_command(ref_motion_state, body_state) root_command = compute_root_command(ref_motion_state, body_state) # Apply masking to kinematic references. The mask contains 1 value for every kinematic body, but # in the observation every kinematic body corresponds to 3 values (x,y,z). For this we repeat # the kinematic part of the mask 3 times. num_bodies = kinematic_command.shape[1] // 3 kinematic_mask = mask[:, :num_bodies].repeat_interleave(3, dim=-1) kinematic_command *= kinematic_mask # Apply masking to joint references. num_joints = joint_command.shape[1] joint_mask = mask[:, num_bodies : num_bodies + num_joints] joint_command *= joint_mask # Apply masking to root references. root_mask = mask[:, num_bodies + num_joints :] root_command *= root_mask # Concatenate all references. We also have to add the mask itself, as else the network has no # way to determine the difference between a target state that is enabled but set to 0, or a # target state that is 0 because it is disabled. observations = torch.cat( [ kinematic_command, joint_command, root_command, mask, ], dim=1, ) return observations
Computes the reference goal state used in the observation of the student.
compute_distilled_imitation_observations
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_observations.py
Apache-2.0
def compute_kinematic_command( ref_motion_state: ReferenceMotionState, body_state: BodyState, ref_episodic_offset: torch.Tensor | None, ) -> torch.Tensor: """ Compute the link position command used in the observation of the student. The link position command consists of: - the delta between the current root position and the target link positions """ num_envs, num_bodies, _ = body_state.body_pos_extend.shape root_pos = body_state.body_pos_extend[:, 0, :] root_rot_wxyz = body_state.body_rot_extend[:, 0, :] root_rot_xyzw = math_utils.convert_quat(root_rot_wxyz, to="xyzw") ref_body_pos = ref_motion_state.body_pos_extend heading_inv_rot_xyzw = torch_utils.calc_heading_quat_inv(root_rot_xyzw) heading_inv_rot_expand_xyzw = heading_inv_rot_xyzw.unsqueeze(-2).repeat((1, num_bodies, 1)) # Delta between the current root position and the target link positions. local_ref_body_pos = ref_body_pos - root_pos.unsqueeze(1).expand(-1, num_bodies, -1) local_ref_body_pos = torch_utils.my_quat_rotate( heading_inv_rot_expand_xyzw.view(-1, 4), local_ref_body_pos.view(-1, 3), ).view(num_envs, num_bodies, -1) return torch.cat( [ local_ref_body_pos.view(num_envs, -1), # num_envs * (num_bodies * 3) ], dim=1, )
Compute the link position command used in the observation of the student. The link position command consists of: - the delta between the current root position and the target link positions
compute_kinematic_command
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_observations.py
Apache-2.0
def compute_joint_command(ref_motion_state: ReferenceMotionState, body_state: BodyState) -> torch.Tensor: """ Compute the joint command used in the observation of the student. The joint reference is the delta between the current joint position/velocity and the target joint position/velocity. """ delta_joint_pos = ref_motion_state.joint_pos - body_state.joint_pos return torch.cat([delta_joint_pos], dim=-1)
Compute the joint command used in the observation of the student. The joint reference is the delta between the current joint position/velocity and the target joint position/velocity.
compute_joint_command
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_observations.py
Apache-2.0
def compute_root_command(ref_motion_state: ReferenceMotionState, body_state: BodyState) -> torch.Tensor: """ Compute the root command used in the observation of the student. The root command consists of - the target root velocity (in the root frame) - the target root roll and pitch - the delta between the current root yaw and the target root yaw - the root height. """ target_root_linear_velocity = math_utils.quat_rotate_inverse( ref_motion_state.root_rot, ref_motion_state.root_lin_vel ) ref_root_rot_wxyz = ref_motion_state.root_rot ref_root_rot_rpy = math_utils.euler_xyz_from_quat(ref_root_rot_wxyz) root_rot_wxyz = body_state.body_rot[:, 0, :] root_rot_rpy = math_utils.euler_xyz_from_quat(root_rot_wxyz) # We use the yaw_delta since yaw cannot be estimated from just proprioceptive measurements. # Another alternative option could be to use yaw velocity instead. roll = ref_root_rot_rpy[0] pitch = ref_root_rot_rpy[1] delta_yaw = ref_root_rot_rpy[2] - root_rot_rpy[2] target_root_rot_rpy = torch.stack([roll, pitch, delta_yaw], dim=-1) target_root_height = ref_motion_state.body_pos[:, 0, 2] return torch.cat( [ target_root_linear_velocity, # num_envs * 3 target_root_rot_rpy, # num_envs * 3 target_root_height.view(-1, 1), # num_envs * 1 ], dim=1, )
Compute the root command used in the observation of the student. The root command consists of - the target root velocity (in the root frame) - the target root roll and pitch - the delta between the current root yaw and the target root yaw - the root height.
compute_root_command
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_observations.py
Apache-2.0
def compute_distilled_robot_state_observation( body_state: BodyState, base_id: int, projected_gravity: torch.Tensor, local_base_ang_velocity: torch.Tensor | None = None, ) -> torch.Tensor: """Root body state in the robot root frame.""" # for normalization joint_pos = body_state.joint_pos.clone() joint_vel = body_state.joint_vel.clone() local_base_ang_vel = local_base_ang_velocity if local_base_ang_velocity is None: local_base_ang_vel = math_utils.quat_rotate_inverse( body_state.body_rot_extend[:, base_id, :], body_state.body_ang_vel_extend[:, base_id, :] ) return torch.cat([joint_pos, joint_vel, local_base_ang_vel, projected_gravity], dim=-1)
Root body state in the robot root frame.
compute_distilled_robot_state_observation
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/student_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/student_observations.py
Apache-2.0
def compute_teacher_observations( body_state: BodyState, ref_motion_state: ReferenceMotionState, tracked_body_ids: list[int], last_actions: torch.Tensor, ref_episodic_offset: torch.Tensor | None = None, ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]: """Computes the observations for the teacher model based on the current body state, reference motion state, and other relevant parameters. Args: body_state (BodyState): The current state of the humanoid bodies. ref_motion_state (ReferenceMotionState): The reference motion state for the humanoid to track. tracked_body_ids (list[int]): List of body IDs to be tracked in observations. last_actions (torch.Tensor): The last actions taken. ref_episodic_offset (torch.Tensor | None, optional): Episodic offset for the reference motion. Defaults to None. Returns: tuple[torch.Tensor, dict[str, torch.Tensor]]: A tuple containing the concatenated observations tensor and a dictionary of individual observations. """ obs_dict = { "robot_state": compute_robot_state_observation(body_state), "imitation": compute_imitation_observations( body_state=body_state, ref_motion_state=ref_motion_state, tracked_body_ids=tracked_body_ids, ref_episodic_offset=ref_episodic_offset, ), "last_action": last_actions, } obs = torch.cat( [tensor for tensor in obs_dict.values()], dim=-1, ) return obs, obs_dict
Computes the observations for the teacher model based on the current body state, reference motion state, and other relevant parameters. Args: body_state (BodyState): The current state of the humanoid bodies. ref_motion_state (ReferenceMotionState): The reference motion state for the humanoid to track. tracked_body_ids (list[int]): List of body IDs to be tracked in observations. last_actions (torch.Tensor): The last actions taken. ref_episodic_offset (torch.Tensor | None, optional): Episodic offset for the reference motion. Defaults to None. Returns: tuple[torch.Tensor, dict[str, torch.Tensor]]: A tuple containing the concatenated observations tensor and a dictionary of individual observations.
compute_teacher_observations
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/teacher_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/teacher_observations.py
Apache-2.0
def compute_robot_state_observation( body_state: BodyState, ) -> torch.Tensor: """Computes the robot state observation in the robot root frame. Args: body_state (BodyState): The current state of the humanoid bodies. """ # for normalization root_pos = body_state.body_pos_extend[:, 0, :].clone() root_rot = body_state.body_rot_extend[:, 0, :].clone() body_pos = body_state.body_pos_extend body_rot = body_state.body_rot_extend body_vel = body_state.body_lin_vel_extend body_ang_vel = body_state.body_ang_vel_extend num_envs, num_bodies, _ = body_pos.shape root_rot = math_utils.convert_quat(root_rot, to="xyzw") heading_rot_inv = torch_utils.calc_heading_quat_inv(root_rot) # xyzw heading_rot_inv_expand = heading_rot_inv.unsqueeze(-2) heading_rot_inv_expand = heading_rot_inv_expand.repeat((1, body_pos.shape[1], 1)) flat_heading_rot_inv = heading_rot_inv_expand.reshape(num_envs * num_bodies, 4) # body pos and normalize to egocentric (for angle only yaw) root_pos_extend = root_pos.unsqueeze(-2) local_body_pos = body_pos - root_pos_extend flat_local_body_pos = local_body_pos.reshape(num_envs * num_bodies, 3) flat_local_body_pos = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_local_body_pos) # input xyzw local_body_pos = flat_local_body_pos.reshape(num_envs, num_bodies * 3) local_body_pos_obs = local_body_pos[..., 3:] # remove root pos # body quat and normalize to egocentric (for angle only yaw) flat_body_rot = body_rot.reshape(num_envs * num_bodies, 4) flat_local_body_rot = math_utils.quat_mul( math_utils.convert_quat(flat_heading_rot_inv, to="wxyz"), flat_body_rot ) # input wxyz, output wxyz flat_local_body_rot = math_utils.convert_quat(flat_local_body_rot, to="xyzw") flat_local_body_rot_obs = torch_utils.quat_to_tan_norm( flat_local_body_rot ) # Shape becomes (num_envs, num_bodies * 6) local_body_rot_obs = flat_local_body_rot_obs.reshape(num_envs, num_bodies * flat_local_body_rot_obs.shape[1]) # body vel and normalize to egocentric (for angle only yaw) flat_body_vel = body_vel.reshape(num_envs * num_bodies, 3) flat_local_body_vel = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_body_vel) local_body_vel = flat_local_body_vel.reshape(num_envs, num_bodies * 3) # body ang vel and normalize to egocentric (for angle only yaw) flat_body_ang_vel = body_ang_vel.reshape(num_envs * num_bodies, 3) flat_local_body_ang_vel = torch_utils.my_quat_rotate(flat_heading_rot_inv, flat_body_ang_vel) local_body_ang_vel = flat_local_body_ang_vel.reshape(num_envs, num_bodies * 3) return torch.cat([local_body_pos_obs, local_body_rot_obs, local_body_vel, local_body_ang_vel], dim=-1)
Computes the robot state observation in the robot root frame. Args: body_state (BodyState): The current state of the humanoid bodies.
compute_robot_state_observation
python
NVlabs/HOVER
neural_wbc/core/neural_wbc/core/observations/teacher_observations.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/observations/teacher_observations.py
Apache-2.0
def test_zero_metrics(self): """ Test the metrics when all bodies have zero error. """ ( success_ids, episode, episode_gt, ) = self._create_data() self.metrics.update( success_ids=success_ids, episode=episode, episode_gt=episode_gt, ) self.metrics.conclude() for name, value in self.metrics.all_metrics.items(): self.assertAlmostEqual(value, 0.0, places=5, msg=f"Metric {name} is not close to 0.0") for name, value in self.metrics.success_metrics.items(): self.assertAlmostEqual(value, 0.0, places=5, msg=f"Metric {name} is not close to 0.0") for name, value in self.metrics.all_metrics_masked.items(): self.assertAlmostEqual(value, 0.0, places=5, msg=f"Metric {name} is not close to 0.0") for name, value in self.metrics.success_metrics_masked.items(): self.assertAlmostEqual(value, 0.0, places=5, msg=f"Metric {name} is not close to 0.0")
Test the metrics when all bodies have zero error.
test_zero_metrics
python
NVlabs/HOVER
neural_wbc/core/tests/test_evaluator.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/tests/test_evaluator.py
Apache-2.0
def test_offset_metrics(self): """ Test the metrics when all bodies have only a translation error to GT. Then we expect the global error to b the translation, the local error should be 0. """ ( success_ids, episode, episode_gt, ) = self._create_data() offset = torch.tensor([0.1, 0.0, 0.0]) for i in range(episode.num_envs): episode.body_pos[i] += offset episode.body_pos_masked[i] += offset self.metrics.update( success_ids=success_ids, episode=episode, episode_gt=episode_gt, ) self.metrics.conclude() self.assertAlmostEqual(self.metrics.all_metrics["mpjpe_g"], np.linalg.norm(offset) * 1000) self.assertAlmostEqual(self.metrics.all_metrics["mpjpe_l"], 0.0) self.assertAlmostEqual(self.metrics.all_metrics_masked["mpjpe_g"], np.linalg.norm(offset) * 1000) self.assertAlmostEqual(self.metrics.all_metrics_masked["mpjpe_l"], 0.0)
Test the metrics when all bodies have only a translation error to GT. Then we expect the global error to b the translation, the local error should be 0.
test_offset_metrics
python
NVlabs/HOVER
neural_wbc/core/tests/test_evaluator.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/tests/test_evaluator.py
Apache-2.0
def test_masked_metrics(self): """ Test the metrics when only the masked bodies have an offset. We expect the masked error to be greater than the unmasked error. """ ( success_ids, episode, episode_gt, ) = self._create_data() offset = torch.tensor([0.1, 0.0, 0.0]) for i in range(episode.num_envs): episode.body_pos[i][2, :] += offset episode.body_pos_masked[i] += offset self.metrics.update( success_ids=success_ids, episode=episode, episode_gt=episode_gt, ) self.metrics.conclude() self.assertAlmostEqual(self.metrics.all_metrics["mpjpe_g"], np.linalg.norm(offset) * 3 / 9 * 1000) self.assertAlmostEqual(self.metrics.all_metrics["mpjpe_l"], 0.0) self.assertAlmostEqual(self.metrics.all_metrics_masked["mpjpe_g"], np.linalg.norm(offset) * 1000) self.assertAlmostEqual(self.metrics.all_metrics_masked["mpjpe_l"], 0.0)
Test the metrics when only the masked bodies have an offset. We expect the masked error to be greater than the unmasked error.
test_masked_metrics
python
NVlabs/HOVER
neural_wbc/core/tests/test_evaluator.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/tests/test_evaluator.py
Apache-2.0
def get_data_path(rel_path: str) -> str: """ Get the absolute path to a data file located in the 'neural_wbc/data/data' directory. Args: rel_path (str): The relative path to the data file from the data directory. Returns: str: The absolute path to the data file. Raises: FileNotFoundError: If the specified file does not exist. """ file_dir = os.path.dirname(os.path.abspath(__file__)) data_path = os.path.join(file_dir, "../../data", rel_path) if not os.path.exists(data_path): raise FileNotFoundError(f"The file at '{rel_path}' does not exist in neural_wbc_data.") return data_path
Get the absolute path to a data file located in the 'neural_wbc/data/data' directory. Args: rel_path (str): The relative path to the data file from the data directory. Returns: str: The absolute path to the data file. Raises: FileNotFoundError: If the specified file does not exist.
get_data_path
python
NVlabs/HOVER
neural_wbc/data/neural_wbc/data/__init__.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/data/neural_wbc/data/__init__.py
Apache-2.0
def __init__( self, cfg: Any, ) -> None: """Initializes a new instance of the H1SDKWrapper class. Args: cfg (Any): The configuration object. """ self.cfg = cfg self._low_cmd = unitree_go_msg_dds__LowCmd_() self._low_cmd_lock = RLock() self._cmd_publish_dt = self.cfg.cmd_publish_dt self._init_cmd() self._low_state = None self.crc = CRC() self._joint_positions = np.zeros(self.cfg.num_joints) self._joint_velocities = np.zeros(self.cfg.num_joints) self._torso_orientation_quat = np.array([1, 0, 0, 0]) self._torso_angular_velocity = np.zeros(3) self._init_sdk() self._cmd_received = False self._cmd_publisher_thread_ptr = RecurrentThread( interval=self._cmd_publish_dt, target=self._cmd_publisher, name="control_loop" ) self._cmd_publisher_thread_ptr.Start()
Initializes a new instance of the H1SDKWrapper class. Args: cfg (Any): The configuration object.
__init__
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def _cmd_publisher(self): """Publishes the low-level command to the SDK.""" with self._low_cmd_lock: if not self._cmd_received: return self._lowcmd_publisher.Write(self._low_cmd)
Publishes the low-level command to the SDK.
_cmd_publisher
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def _init_sdk(self): """Initializes the SDK for the H1 robot. This function initializes the SDK using the required configuration. Args: None """ ChannelFactoryInitialize(0, self.cfg.network_interface) # Create publisher self._lowcmd_publisher = ChannelPublisher(self.cfg.command_channel, LowCmd_) self._lowcmd_publisher.Init() # Create subscriber self.lowstate_subscriber = ChannelSubscriber(self.cfg.state_channel, LowState_) self.lowstate_subscriber.Init(self.state_handler, self.cfg.subscriber_freq)
Initializes the SDK for the H1 robot. This function initializes the SDK using the required configuration. Args: None
_init_sdk
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def publish_joint_position_cmd(self, cmd_joint_positions: np.ndarray): """Publishes joint position commands to the low-level command publisher. Args: cmd_joint_positions (np.ndarray): An array of joint positions to be published. """ with self._low_cmd_lock: for joint_idx in range(self.cfg.num_joints): motor_idx = self.cfg.JointSeq2MotorID[joint_idx] self._low_cmd.motor_cmd[motor_idx].q = cmd_joint_positions[joint_idx] self._low_cmd.motor_cmd[motor_idx].dq = 0.0 self._low_cmd.motor_cmd[motor_idx].tau = 0.0 self._low_cmd.crc = self.crc.Crc(self._low_cmd) self._cmd_received = True
Publishes joint position commands to the low-level command publisher. Args: cmd_joint_positions (np.ndarray): An array of joint positions to be published.
publish_joint_position_cmd
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def publish_joint_torque_cmd(self, cmd_joint_torques: np.ndarray): """Publishes joint torque commands to the low-level command publisher. Args: cmd_joint_torques (np.ndarray): An array of joint torques to be published. """ with self._low_cmd_lock: for joint_idx in range(self.cfg.num_joints): motor_idx = self.cfg.JointSeq2MotorID[joint_idx] self._low_cmd.motor_cmd[motor_idx].q = 0.0 self._low_cmd.motor_cmd[motor_idx].dq = 0.0 self._low_cmd.motor_cmd[motor_idx].tau = cmd_joint_torques[joint_idx] self._low_cmd.motor_cmd[motor_idx].kp = 0.0 self._low_cmd.motor_cmd[motor_idx].kd = 0.0 self._low_cmd.crc = self.crc.Crc(self._low_cmd) self._cmd_received = True
Publishes joint torque commands to the low-level command publisher. Args: cmd_joint_torques (np.ndarray): An array of joint torques to be published.
publish_joint_torque_cmd
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def reset(self, desired_joint_positions: np.ndarray | None = None) -> None: """Resets the robot to the given joint positions. Args: desired_joint_positions (np.ndarray | None, optional): An array of desired joint positions. Defaults to None: The robot will be reset to the 0 initial pose. """ self.time_ = 0.0 self.control_dt_ = self.cfg.reset_step_dt self.duration_ = self.cfg.reset_duration desired_joint_positions = desired_joint_positions.flatten() if desired_joint_positions is None: desired_joint_positions = np.zeros(self.cfg.num_joints) print("Resetting H1 to given pose.") while self.time_ < self.duration_: self.time_ += self.control_dt_ ratio = self.time_ / self.duration_ print(f"\rResetting: {int(self.duration_ - self.time_)}s remaining...", end="", flush=True) current_joint_positions = self.joint_positions target_joint_positions = ( current_joint_positions + (desired_joint_positions - current_joint_positions) * ratio ) self.publish_joint_position_cmd(target_joint_positions) time.sleep(self.control_dt_) print("\nReset complete.")
Resets the robot to the given joint positions. Args: desired_joint_positions (np.ndarray | None, optional): An array of desired joint positions. Defaults to None: The robot will be reset to the 0 initial pose.
reset
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def _init_cmd(self): """Initializes the low-level command. This function sets the values of the low-level command based on the configuration. """ self._low_cmd.head[0] = self.cfg.head0 self._low_cmd.head[1] = self.cfg.head1 self._low_cmd.level_flag = self.cfg.level_flag self._low_cmd.gpio = self.cfg.gpio for i in range(len(self.cfg.motor_id_to_name)): if self._is_weak_motor(i): self._low_cmd.motor_cmd[i].mode = self.cfg.weak_motor_mode self._low_cmd.motor_cmd[i].kp = self.cfg.kp_low self._low_cmd.motor_cmd[i].kd = self.cfg.kd_low else: self._low_cmd.motor_cmd[i].mode = self.cfg.strong_motor_mode self._low_cmd.motor_cmd[i].kp = self.cfg.kp_high self._low_cmd.motor_cmd[i].kd = self.cfg.kd_high self._low_cmd.motor_cmd[i].q = 0 self._low_cmd.motor_cmd[i].dq = 0 self._low_cmd.motor_cmd[i].tau = 0
Initializes the low-level command. This function sets the values of the low-level command based on the configuration.
_init_cmd
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def state_handler(self, msg: LowState_): """ Update the joint positions and velocities based on the low state message. Saves them as per the joint sequence of isaac lab and mujoco. Args: msg (LowState_): The low state message containing the motor states. """ self._low_state = msg # The orientation is in the world frame, which depends on the initial pose of the robot. self._torso_orientation_quat = self._low_state.imu_state.quaternion # The angular velocity is in robot frame, which does not depend on the initial pose of the robot. self._torso_angular_velocity = np.array(self._low_state.imu_state.gyroscope) for joint_idx in range(self.cfg.num_joints): motor_idx = self.cfg.JointSeq2MotorID[joint_idx] self._joint_positions[joint_idx] = msg.motor_state[motor_idx].q self._joint_velocities[joint_idx] = msg.motor_state[motor_idx].dq
Update the joint positions and velocities based on the low state message. Saves them as per the joint sequence of isaac lab and mujoco. Args: msg (LowState_): The low state message containing the motor states.
state_handler
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/h1_sdk_wrapper.py
Apache-2.0
def update(self, obs_dict: dict[str, torch.Tensor]) -> None: """Update the underlying model based on the observations from the environment/real robot. Args: obs_dict (dict[str, torch.Tensor]): A dictionary containing the latest robot observations. """ # TODO (pulkitg): Change this to get data from vicon later on # For root use values from init state in config if "root_pos" in obs_dict: self._root_position = obs_dict["root_pos"] if "root_orientation" in obs_dict: self._root_rotation = obs_dict["root_orientation"] # We only need root position for the policy self._root_lin_vel = torch.zeros(1, 3).to(dtype=torch.float32, device=self.device) self._root_ang_vel = torch.zeros(1, 3).to(dtype=torch.float32, device=self.device) # Get state from the robot self._joint_positions = ( torch.from_numpy(self._h1_sdk.joint_positions).unsqueeze(0).to(dtype=torch.float32, device=self.device) ) self._joint_velocities = ( torch.from_numpy(self._h1_sdk.joint_velocities).unsqueeze(0).to(dtype=torch.float32, device=self.device) ) qpos = torch.hstack((self._root_position, self._root_rotation, self._joint_positions)) qvel = torch.hstack((self._root_ang_vel, self._root_lin_vel, self._joint_velocities)) self._kinematic_model.reset(qpos, qvel) self._joint_positions = self._kinematic_model.joint_positions self._joint_velocities = self._kinematic_model.joint_velocities self._body_positions = self._kinematic_model.body_positions self._body_rotations = self._kinematic_model.body_rotations self._body_lin_vels, self._body_ang_vels = self._kinematic_model.body_velocities
Update the underlying model based on the observations from the environment/real robot. Args: obs_dict (dict[str, torch.Tensor]): A dictionary containing the latest robot observations.
update
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
Apache-2.0
def reset(self, **kwargs) -> None: """Resets the wrapper Args: kwargs (dict[str, Any], optional): key-word arguments to pass to underlying models. Defaults to None. """ qpos = kwargs.get("qpos") qvel = kwargs.get("qvel") self._kinematic_model.reset(qpos=qpos, qvel=qvel) joint_positions = qpos[..., self._kinematic_model.joint_pos_offset :] joint_positions_np = joint_positions.numpy() # Reset robot pose self._h1_sdk.reset(joint_positions_np) self.update({})
Resets the wrapper Args: kwargs (dict[str, Any], optional): key-word arguments to pass to underlying models. Defaults to None.
reset
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
Apache-2.0
def get_joint_ids(self, joint_names: list[str] | None = None) -> dict[str, int]: """Get the IDs of all joints in the model, indexed after removing the free joint. Args: joint_names (list[str] | None, optional): Names of the joints. Defaults to None. Returns: dict[str, int]: Mapping from joint name to joint id. """ # TODO(pulkig): Resolve these with the unitree sdk motor IDs return self._kinematic_model.get_joint_ids(joint_names, self._free_joint_offset)
Get the IDs of all joints in the model, indexed after removing the free joint. Args: joint_names (list[str] | None, optional): Names of the joints. Defaults to None. Returns: dict[str, int]: Mapping from joint name to joint id.
get_joint_ids
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
Apache-2.0
def get_base_projected_gravity(self, base_name: str = "torso_link") -> torch.Tensor: """Get the projection of the gravity vector to the base frame Args: base_name (str, optional): Name of the base. Defaults to 'torso_link'. Returns: torch.Tensor: Projection of the gravity vector to the base frame """ # Get torso orientation in quaternion [w, x, y, z] torso_orientation_quat = self._h1_sdk.torso_orientation # Create rotation matrix from quaternion torso_rot_mat_np = R.from_quat(torso_orientation_quat, scalar_first=True).as_matrix() torso_rot_mat = torch.tensor(torso_rot_mat_np, device=self.device, dtype=torch.float32) # World gravity vector (normalized) world_gravity = torch.tensor([0.0, 0.0, -1.0], device=self.device, dtype=torch.float32) # Transform gravity to base frame base_gravity = torso_rot_mat.T @ world_gravity return base_gravity.unsqueeze(0)
Get the projection of the gravity vector to the base frame Args: base_name (str, optional): Name of the base. Defaults to 'torso_link'. Returns: torch.Tensor: Projection of the gravity vector to the base frame
get_base_projected_gravity
python
NVlabs/HOVER
neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/hw_wrappers/unitree_h1.py
Apache-2.0
def test_get_base_projected_gravity_upright(self): """Test gravity projection when robot is upright""" # Set upright orientation (identity quaternion) self.robot._h1_sdk.torso_orientation = np.array([1.0, 0.0, 0.0, 0.0]) # Call the function gravity = self.robot.get_base_projected_gravity("pelvis") # In upright position, gravity should point straight down in base frame expected = torch.tensor([[0.0, 0.0, -1.0]], device=self.device) torch.testing.assert_close(gravity, expected)
Test gravity projection when robot is upright
test_get_base_projected_gravity_upright
python
NVlabs/HOVER
neural_wbc/hw_wrappers/tests/test_unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/tests/test_unitree_h1.py
Apache-2.0
def test_get_base_projected_gravity_rotated_90x(self): """Test gravity projection when robot is rotated 90 degrees around x-axis""" # Create quaternion for 90-degree rotation around x-axis quat = R.from_euler("x", 90, degrees=True).as_quat(scalar_first=True) # Convert to scalar-first format [w, x, y, z] self.robot._h1_sdk.torso_orientation = quat # Call the function gravity = self.robot.get_base_projected_gravity() # After 90-degree x rotation, gravity should point along negative y in base frame expected = torch.tensor([[0.0, -1.0, 0.0]], device=self.device) torch.testing.assert_close(gravity, expected, atol=1e-6, rtol=1e-6)
Test gravity projection when robot is rotated 90 degrees around x-axis
test_get_base_projected_gravity_rotated_90x
python
NVlabs/HOVER
neural_wbc/hw_wrappers/tests/test_unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/tests/test_unitree_h1.py
Apache-2.0
def test_get_base_projected_gravity_rotated_45y(self): """Test gravity projection when robot is rotated 45 degrees around y-axis""" # Create quaternion for 45-degree rotation around y-axis quat = R.from_euler("y", 45, degrees=True).as_quat(scalar_first=True) # Convert to scalar-first format [w, x, y, z] self.robot._h1_sdk.torso_orientation = quat # Call the function gravity = self.robot.get_base_projected_gravity() # After 45-degree y rotation, gravity should be split between x and z expected = torch.tensor([[0.707107, 0.0, -0.707107]], device=self.device) torch.testing.assert_close(gravity, expected, atol=1e-6, rtol=1e-6)
Test gravity projection when robot is rotated 45 degrees around y-axis
test_get_base_projected_gravity_rotated_45y
python
NVlabs/HOVER
neural_wbc/hw_wrappers/tests/test_unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/tests/test_unitree_h1.py
Apache-2.0
def test_get_base_projected_gravity_inverted(self): """Test gravity projection when robot is completely inverted""" # Create quaternion for 180-degree rotation around x-axis quat = R.from_euler("x", 180, degrees=True).as_quat(scalar_first=True) self.robot._h1_sdk.torso_orientation = quat # Call the function gravity = self.robot.get_base_projected_gravity() # When inverted, gravity should point upward in base frame expected = torch.tensor([[0.0, 0.0, 1.0]], device=self.device) torch.testing.assert_close(gravity, expected, atol=1e-6, rtol=1e-6)
Test gravity projection when robot is completely inverted
test_get_base_projected_gravity_inverted
python
NVlabs/HOVER
neural_wbc/hw_wrappers/tests/test_unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/tests/test_unitree_h1.py
Apache-2.0
def test_get_base_projected_gravity_rotated_90z(self): """Test gravity projection when robot is rotated 90 degrees around z-axis""" # Create quaternion for 90-degree rotation around z-axis quat = R.from_euler("z", 90, degrees=True).as_quat(scalar_first=True) self.robot._h1_sdk.torso_orientation = quat # Call the function gravity = self.robot.get_base_projected_gravity() # After 90-degree z rotation, gravity should still point straight down # because z-axis rotation doesn't affect gravity direction expected = torch.tensor([[0.0, 0.0, -1.0]], device=self.device) torch.testing.assert_close(gravity, expected, atol=1e-6, rtol=1e-6)
Test gravity projection when robot is rotated 90 degrees around z-axis
test_get_base_projected_gravity_rotated_90z
python
NVlabs/HOVER
neural_wbc/hw_wrappers/tests/test_unitree_h1.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/hw_wrappers/tests/test_unitree_h1.py
Apache-2.0
def __init__( self, args_cli: argparse.Namespace, env_cfg: NeuralWBCEnvCfg, custom_config: dict[str, Any] | None = None, demo_mode: bool = False, ): """ Args: args_cli: command line arguments custom_config: custom configuration for the environment demo_mode (bool): whether to run in demo mode, without need for student policy Note: The *demo_mode* allows setting of joint manually for e.g. debugging purposes. """ env_cfg.robot = args_cli.robot self.demo_mode = demo_mode if not args_cli.headless: env_cfg.enable_viewer = True if args_cli.reference_motion_path is not None: env_cfg.reference_motion_cfg.motion_path = args_cli.reference_motion_path if custom_config is not None: self._update_env_cfg(env_cfg=env_cfg, custom_config=custom_config) # Creates inference environment self.env = NeuralWBCEnv(cfg=env_cfg) assert self.env.cfg.control_type in [ "Pos", "None", ], "Only position control or None is supported for this player." if not self.demo_mode: student_path = args_cli.student_path if student_path: with open(os.path.join(student_path, "config.json")) as fh: config_dict = json.load(fh) config_dict["resume_path"] = student_path config_dict["checkpoint"] = args_cli.student_checkpoint student_cfg = StudentPolicyTrainerCfg(**config_dict) student_trainer = StudentPolicyTrainer(env=self.env, cfg=student_cfg) self.policy = student_trainer.get_inference_policy(device=self.env.device) else: raise ValueError("student_policy.resume_path is needed for play or eval. Please specify a value.")
Args: args_cli: command line arguments custom_config: custom configuration for the environment demo_mode (bool): whether to run in demo mode, without need for student policy Note: The *demo_mode* allows setting of joint manually for e.g. debugging purposes.
__init__
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/deployment_player.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/deployment_player.py
Apache-2.0
def play_once(self, ext_actions: torch.Tensor | None = None): """Advances the environment one time step after generating observations""" obs = self.env.get_observations() with torch.inference_mode(): if ext_actions is not None: actions = ext_actions else: actions = self.policy(obs) _, obs, dones, extras = self.env.step(actions) # For HW, this internally just does forward return actions, obs, dones, extras
Advances the environment one time step after generating observations
play_once
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/deployment_player.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/deployment_player.py
Apache-2.0
def _update_env_cfg(self, env_cfg, custom_config: dict[str, Any]): """Update the default environment config if user provides a custom config. See readme for detailed usage.""" for key, value in custom_config.items(): obj = env_cfg attrs = key.split(".") try: for a in attrs[:-1]: obj = getattr(obj, a) setattr(obj, attrs[-1], value) except AttributeError as atx: raise AttributeError(f"[ERROR]: {key} is not a valid configuration key.") from atx print("Updated configuration:") pprint.pprint(env_cfg)
Update the default environment config if user provides a custom config. See readme for detailed usage.
_update_env_cfg
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/deployment_player.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/deployment_player.py
Apache-2.0
def __init__( self, cfg: NeuralWBCEnvCfg, render_mode: str | None = None, device=torch.device("cuda" if torch.cuda.is_available() else "cpu"), ) -> None: """Initializes the environment. Args: cfg (NeuralWBCEnvCfgH1): Environment configuration. render_mode (str | None, optional): Render mode. Defaults to None. device (torch.device, optional): torch device to use. Defaults to 'cuda'. """ super().__init__(mode=cfg.mode) self.cfg = cfg self.render_mode = render_mode self.num_envs = 1 self._env_ids = torch.arange(0, self.num_envs, device=device) self.device = device self.reference_motion_manager = ReferenceMotionManager( cfg=self.cfg.reference_motion_cfg, device=self.device, num_envs=self.num_envs, random_sample=(self.cfg.mode.is_training_mode()), extend_head=True, dt=self.cfg.decimation * self.cfg.dt, ) # Start positions of each environment self._start_positions_on_terrain = torch.zeros([self.num_envs, 3], device=self.device, dtype=torch.float) if cfg.robot not in get_robot_names(): raise ValueError(f"Unknown robot: {cfg.robot}, options are: {get_robot_names()}") robot_class = get_robot_class(cfg.robot) self._robot = robot_class( cfg, num_instances=1, device=self.device, ) self._joint_ids = self._robot.get_joint_ids() self._body_ids = self._robot.get_body_ids() self._base_name = "torso_link" self._base_id = self._body_ids[self._base_name] self.num_actions = self._robot.num_controls print("[INFO]: Joint ids", self._joint_ids) print("[INFO]: Body ids", self._body_ids) # actions self.actions = torch.zeros(self.num_envs, self.num_actions, device=self.device) self._processed_actions = torch.zeros(self.num_envs, self.num_actions, device=self.device) self.episode_length_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long) # Resolve the extended bodies if self.cfg.extend_body_parent_names: self.extend_body_parent_ids = [self._body_ids[name] for name in self.cfg.extend_body_parent_names] self.extend_body_pos = self.cfg.extend_body_pos.repeat(self.num_envs, 1, 1).to(self.device) for name in self.cfg.extend_body_names: self._body_ids[name] = len(self._body_ids) else: self.extend_body_parent_ids = None self.extend_body_pos = None self._tracked_body_ids = [self._body_ids[name] for name in self.cfg.tracked_body_names] # Initialize extras self.extras = {} # Distill if self.cfg.mode.is_distill_mode(): self.history = StudentHistory( num_envs=self.num_envs, device=self.device, entry_length=self.cfg.single_history_dim, max_entries=self.cfg.observation_history_length, ) print("[INFO]: Setting up pd gains") self._p_gains = torch.zeros((self.num_envs, self.num_actions), dtype=torch.float, device=self.device) self._d_gains = torch.zeros((self.num_envs, self.num_actions), dtype=torch.float, device=self.device) for key, value in self.cfg.stiffness.items(): joint_id_dict = self._robot.get_joint_ids([key]) self._p_gains[:, joint_id_dict[key]] = value print("[INFO]: Setting up p gains", joint_id_dict[key], key, value) for key, value in self.cfg.damping.items(): joint_id_dict = self._robot.get_joint_ids([key]) self._d_gains[:, joint_id_dict[key]] = value print("[INFO]: Setting up d gains", joint_id_dict[key], key, value) self._effort_limit = torch.zeros((self.num_envs, self.num_actions), dtype=torch.float, device=self.device) for key, value in self.cfg.effort_limit.items(): joint_id_dict = self._robot.get_joint_ids([key]) self._effort_limit[:, joint_id_dict[key]] = value print("[INFO]: Setting effort limit", joint_id_dict[key], key, value) self._lower_position_limit = torch.zeros( (self.num_envs, self.num_actions), dtype=torch.float, device=self.device ) self._upper_position_limit = torch.zeros( (self.num_envs, self.num_actions), dtype=torch.float, device=self.device ) for key, value in self.cfg.position_limit.items(): joint_id_dict = self._robot.get_joint_ids([key]) self._lower_position_limit[:, joint_id_dict[key]] = value[0] self._upper_position_limit[:, joint_id_dict[key]] = value[1] print("[INFO]: Setting position limit", joint_id_dict[key], key, value) # resolve the controller self._control_fn = resolve_control_fn(self.cfg.control_type) # resolve the limit type self._apply_limits = self._resolve_limit_fn(self.cfg.robot_actuation_type) # resolve the control delay self.action_queue = torch.zeros( (self.num_envs, self.cfg.ctrl_delay_step_range[1] + 1, self.num_actions), dtype=torch.float, device=self.device, requires_grad=False, ) self._action_delay = torch.randint( self.cfg.ctrl_delay_step_range[0], self.cfg.ctrl_delay_step_range[1] + 1, (self.num_envs,), device=self.device, requires_grad=False, ) # resolve the control noise: we will add noise to the final torques. the _rfi_lim defines # the sample range of the added noise. It represented by the percentage of the control limits. # noise = uniform(self.rfi_lim*joint_effort_limit, self.rfi_lim_*joint_effort_limit) self.default_rfi_lim = self.cfg.default_rfi_lim * torch.ones( (self.num_envs, self.num_actions), dtype=torch.float, device=self.device ) self.rfi_lim = self.default_rfi_lim.clone() # default state self._default_qpos = self._robot.default_joint_positions self._default_qvel = self._robot.default_joint_velocities self._update_robot_default_state() # The mask is ordered by bodies, joints, root references. Since bodies are first we can # directly reuse the self._tracked_body_ids here too. self._mask = torch.zeros((self.num_envs, self.cfg.mask_length), device=self.device).bool() self._mask[:, self._tracked_body_ids] = True self.reset()
Initializes the environment. Args: cfg (NeuralWBCEnvCfgH1): Environment configuration. render_mode (str | None, optional): Render mode. Defaults to None. device (torch.device, optional): torch device to use. Defaults to 'cuda'.
__init__
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def step(self, actions: torch.Tensor) -> tuple[dict, torch.Tensor, torch.Tensor, dict]: """Performs one step of simulation. Args: actions (torch.Tensor): Actions of shape (num_envs, num_actions) Returns: * Observations as a dict or an object. * Rewards of shape (num_envs,) * "Dones": a boolean tensor representing termination of an episode in each environment. * Extra information captured, as a dict, always empty. """ # process actions self._pre_physics_step(actions) for _ in range(self.cfg.decimation): self._apply_action() # Convert it to numpy and apply it to the simulator. processed_action_np = self._processed_action.detach().cpu().numpy() self.robot.step(processed_action_np) # Forward the current episode step buffer to keep track of current number of steps into the motion reference. self.episode_length_buf += 1 terminated, time_outs = self._get_dones() dones = (terminated | time_outs).to(dtype=terminated.dtype) # This env will be used for testing only. rewards = {} obs_dict = self._compute_observations() if self.cfg.mode.is_distill_mode(): obs = obs_dict["student_policy"] else: obs = obs_dict["teacher_policy"] # Extras are required for evaluation self.extras["observations"] = obs_dict extras = self._compute_extras() self.extras.update(extras) return rewards, obs, dones, self.extras
Performs one step of simulation. Args: actions (torch.Tensor): Actions of shape (num_envs, num_actions) Returns: * Observations as a dict or an object. * Rewards of shape (num_envs,) * "Dones": a boolean tensor representing termination of an episode in each environment. * Extra information captured, as a dict, always empty.
step
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _pre_physics_step(self, actions: torch.Tensor): """Prepares the robot for the next physics step. Args: actions (torch.Tensor): Actions of shape (num_envs, num_actions) """ # update history if self.cfg.mode.is_distill_mode(): obs_dic = self._compute_observations() self.history.update(obs_dic) # Action delay process if self.cfg.ctrl_delay_step_range[1] > 0: self.action_queue[:, 1:] = self.action_queue[:, :-1].clone() self.action_queue[:, 0] = actions.clone() self.actions = self.action_queue[torch.arange(self.num_envs), self._action_delay].clone() else: self.actions = actions.clone()
Prepares the robot for the next physics step. Args: actions (torch.Tensor): Actions of shape (num_envs, num_actions)
_pre_physics_step
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def reset(self, env_ids: list | torch.Tensor | None = None): """Resets environment specified by env_ids. Args: env_ids (list | torch.Tensor | None, optional): Environment ids. Defaults to None. """ env_ids = self._env_ids if env_ids is None else env_ids self._reset_robot_state_and_motion(env_ids=env_ids) # reset actions self.actions[env_ids] = 0.0 self.action_queue[env_ids] = 0.0 self._action_delay[env_ids] = torch.randint( self.cfg.ctrl_delay_step_range[0], self.cfg.ctrl_delay_step_range[1] + 1, (len(env_ids),), device=self.device, requires_grad=False, ) # reset history if self.cfg.mode.is_distill_mode(): self.history.reset(env_ids=env_ids) obs = self.get_observations() return obs, None
Resets environment specified by env_ids. Args: env_ids (list | torch.Tensor | None, optional): Environment ids. Defaults to None.
reset
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _reset_robot_state_and_motion(self, env_ids: list | torch.Tensor | None = None): """Resets the robot state and the reference motion. Args: env_ids (list | torch.Tensor | None, optional): Environment ids. Defaults to None. """ env_ids = env_ids if env_ids is not None else self._env_ids self.reference_motion_manager.reset_motion_start_times(env_ids=env_ids, sample=False) self.episode_length_buf[env_ids] = 0 # Record new start locations on terrain self._start_positions_on_terrain[env_ids, ...] = self._default_qpos[env_ids, :3] ref_motion_state = self.reference_motion_manager.get_state_from_motion_lib_cache( episode_length_buf=self.episode_length_buf, offset=self._start_positions_on_terrain, terrain_heights=self.robot.get_terrain_heights(), ) joint_pos = ref_motion_state.joint_pos[env_ids] joint_vel = ref_motion_state.joint_vel[env_ids] # Note: In IsaacLab implementation, the z direction is offset by a constant, which is not necessary for mujoco. root_pos = ref_motion_state.root_pos[env_ids] root_quat = ref_motion_state.root_rot[env_ids] root_lin_vel = ref_motion_state.root_lin_vel[env_ids] root_ang_vel = ref_motion_state.root_ang_vel[env_ids] # Assemble the state of generalized coordinates. qpos = torch.hstack((root_pos, root_quat, joint_pos)) qvel = torch.hstack((root_ang_vel, root_lin_vel, joint_vel)) self.robot.reset(qpos=qpos, qvel=qvel)
Resets the robot state and the reference motion. Args: env_ids (list | torch.Tensor | None, optional): Environment ids. Defaults to None.
_reset_robot_state_and_motion
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _update_robot_default_state(self): """Loads the default initial state of the robot.""" # Read the default values from the mujoco data first. robot_init_state = self.cfg.robot_init_state state_update_dict = {} root_pos_offset = 3 if "base_pos" in robot_init_state: self._default_qpos[:, :root_pos_offset] = torch.tensor( self.cfg.robot_init_state["base_pos"], device=self.device ) state_update_dict["root_pos"] = self._default_qpos[:, :root_pos_offset] if "base_quat" in robot_init_state: self._default_qpos[:, root_pos_offset : self._robot.joint_pos_offset] = torch.tensor( self.cfg.robot_init_state["base_quat"], device=self.device ) state_update_dict["root_orientation"] = self._default_qpos[ :, root_pos_offset : self._robot.joint_pos_offset ] joint_pos = robot_init_state.get("joint_pos", {}) joint_vel = robot_init_state.get("joint_vel", {}) for key, value in joint_pos.items(): joint_id = self._robot.get_joint_ids([key])[key] self._default_qpos[:, joint_id + self._robot.joint_pos_offset] = value for key, value in joint_vel.items(): joint_id = self._robot.get_joint_ids([key])[key] self._default_qvel[:, joint_id + self._robot.joint_vel_offset] = value state_update_dict["joint_positions"] = self._default_qpos[:, self._robot.joint_pos_offset :] state_update_dict["joint_velocities"] = self._default_qvel[:, self._robot.joint_vel_offset :] self._robot.update(state_update_dict)
Loads the default initial state of the robot.
_update_robot_default_state
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _apply_action(self): """Applies the current action to the robot.""" actions_scaled = self.actions * self.cfg.action_scale + self.default_joint_pos self._processed_action = self._control_fn(self, actions_scaled) self._processed_action = self._apply_limits(self._processed_action) # Adding noise to the control signal to enhance robustness joint_ids = [v for v in self._joint_ids.values()] actions_noise = ( (torch.rand_like(self._processed_actions) * 2.0 - 1.0) * self.rfi_lim * self._effort_limit[:, joint_ids] ) self._processed_actions += actions_noise
Applies the current action to the robot.
_apply_action
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _compose_body_state( self, extend_body_pos: torch.Tensor | None = None, extend_body_parent_ids: list[int] | None = None ) -> BodyState: """Compose the Body state from the robot/articulation data. Args: extend_body_pos (torch.Tensor | None, optional): Extended body positions. Defaults to None. extend_body_parent_ids (list[int] | None, optional): Extended body parent ids. Defaults to None. Returns: BodyState: Composed body state """ lin_vel, ang_vel = self.robot.body_velocities body_state = BodyState( body_pos=self.robot.body_positions, body_rot=self.robot.body_rotations, body_lin_vel=lin_vel, body_ang_vel=ang_vel, joint_pos=self.robot.joint_positions, joint_vel=self.robot.joint_velocities, root_id=self._base_id, ) if (extend_body_pos is not None) and (extend_body_parent_ids is not None): body_state.extend_body_states( extend_body_pos=extend_body_pos, extend_body_parent_ids=extend_body_parent_ids ) return body_state
Compose the Body state from the robot/articulation data. Args: extend_body_pos (torch.Tensor | None, optional): Extended body positions. Defaults to None. extend_body_parent_ids (list[int] | None, optional): Extended body parent ids. Defaults to None. Returns: BodyState: Composed body state
_compose_body_state
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _compute_observations(self): """Compute the observations Returns: dict: Observations """ self._robot.update({}) # NOTE: not including the privileged observations so far obs_dict = {} ref_motion_state = self.reference_motion_manager.get_state_from_motion_lib_cache( episode_length_buf=self.episode_length_buf, offset=self._start_positions_on_terrain, terrain_heights=self.robot.get_terrain_heights(), ) self._robot.visualize(ref_motion_state=ref_motion_state) current_body_state = self._compose_body_state( extend_body_pos=self.extend_body_pos, extend_body_parent_ids=self.extend_body_parent_ids ) self.extras["data"] = { "mask": self._mask.detach().clone(), "state": { "body_pos": current_body_state.body_pos_extend.detach().clone(), "joint_pos": current_body_state.joint_pos.detach().clone(), "root_pos": current_body_state.root_pos.detach().clone(), "root_rot": current_body_state.root_rot.detach().clone(), "root_lin_vel": current_body_state.root_lin_vel.detach().clone(), }, "ground_truth": { "body_pos": ref_motion_state.body_pos_extend.detach().clone(), "joint_pos": ref_motion_state.joint_pos.detach().clone(), "root_pos": ref_motion_state.root_pos.detach().clone(), "root_rot": ref_motion_state.root_rot.detach().clone(), "root_lin_vel": ref_motion_state.root_lin_vel.detach().clone(), }, "upper_joint_ids": self.cfg.upper_body_joint_ids, "lower_joint_ids": self.cfg.lower_body_joint_ids, } base_gravity = self.robot.get_base_projected_gravity(self._base_name) # The angular velocity from the Unitree H1 IMU is already provided in the robot's local frame. # However, in MuJoCo, the angular velocity in the local frame must be computed using world frame mjc body data. local_base_ang_velocity = ( self.robot.get_base_angular_velocity(self._base_name) if self.cfg.robot == "unitree_h1" else None ) student_obs, student_obs_dict = compute_student_observations( base_id=self._base_id, body_state=current_body_state, ref_motion_state=ref_motion_state, projected_gravity=base_gravity, last_actions=self.actions, history=self.history.entries, ref_episodic_offset=None, mask=self._mask, local_base_ang_velocity=local_base_ang_velocity, ) obs_dict.update(student_obs_dict) obs_dict["student_policy"] = student_obs return obs_dict
Compute the observations Returns: dict: Observations
_compute_observations
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: """Get the done flags. Returns: tuple[torch.Tensor, torch.Tensor]: Should terminate flags and time out flags """ time_out = self.episode_length_buf >= self.max_episode_length - 1 ref_motion_state = self.reference_motion_manager.get_state_from_motion_lib_cache( episode_length_buf=self.episode_length_buf, offset=self._start_positions_on_terrain, terrain_heights=self.robot.get_terrain_heights(), ) current_body_state = self._compose_body_state( extend_body_pos=self.extend_body_pos, extend_body_parent_ids=self.extend_body_parent_ids ) # We don't yet use the correct contact forces in mujoco, therefore we just mock the values # to 0, s.t. this condition never triggers. # Dim: (num_envs, history_size, num_bodies, 3) net_contact_forces = torch.zeros(self.num_envs, 1, 3, device=self.device) undesired_contact_body_ids = [0] should_terminate, self._termination_conditions = check_termination_conditions( training_mode=self.cfg.mode.is_training_mode(), body_state=current_body_state, ref_motion_state=ref_motion_state, projected_gravity=self.robot.get_base_projected_gravity(self._base_name), gravity_x_threshold=self.cfg.gravity_x_threshold, gravity_y_threshold=self.cfg.gravity_y_threshold, ref_motion_mgr=self.reference_motion_manager, episode_times=self._get_episode_times(), max_ref_motion_dist=self.cfg.max_ref_motion_dist, in_recovery=None, net_contact_forces=net_contact_forces, undesired_contact_body_ids=undesired_contact_body_ids, mask=self._mask, ) self.extras["termination_conditions"] = self._termination_conditions self.extras["time_outs"] = time_out return should_terminate, time_out
Get the done flags. Returns: tuple[torch.Tensor, torch.Tensor]: Should terminate flags and time out flags
_get_dones
python
NVlabs/HOVER
neural_wbc/inference_env/inference_env/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/inference_env/inference_env/neural_wbc_env.py
Apache-2.0
def build_body_state( data: ArticulationData, root_id: int, body_ids: list[int] | None = None, joint_ids: list[int] | None = None, extend_body_pos: torch.Tensor | None = None, extend_body_parent_ids: list[int] | None = None, ) -> BodyState: """Creates a body state from Isaac Lab articulation data. Args: data (ArticulationData): Articulation data containing robot's body and joint states. body_ids (list[int] | None, optional): The desired order of bodies. If not, the order in body states is preserved. Defaults to None. joint_ids (list[int] | None, optional): The desired order of joint. If not, the order in joint states is preserved. Defaults to None. extend_body_parent_ids (list[int] | None, optional): ID of the bodies to extend. Defaults to None. extend_body_pos (torch.Tensor | None, optional): Position of the extended bodies from their parent bodies. Defaults to None. Returns: BodyState: The constructed BodyState object containing the reordered and extended body and joint states. """ if body_ids is None: num_bodies = data.body_pos_w.shape[1] body_ids = list(range(0, num_bodies)) if joint_ids is None: num_joints = data.joint_pos.shape[1] joint_ids = list(range(0, num_joints)) body_state = BodyState( body_pos=data.body_pos_w[:, body_ids, :], body_rot=data.body_quat_w[:, body_ids, :], body_lin_vel=data.body_lin_vel_w[:, body_ids, :], body_ang_vel=data.body_ang_vel_w[:, body_ids, :], joint_pos=data.joint_pos[:, joint_ids], joint_vel=data.joint_vel[:, joint_ids], root_id=root_id, ) if (extend_body_pos is not None) and (extend_body_parent_ids is not None): body_state.extend_body_states(extend_body_pos=extend_body_pos, extend_body_parent_ids=extend_body_parent_ids) return body_state
Creates a body state from Isaac Lab articulation data. Args: data (ArticulationData): Articulation data containing robot's body and joint states. body_ids (list[int] | None, optional): The desired order of bodies. If not, the order in body states is preserved. Defaults to None. joint_ids (list[int] | None, optional): The desired order of joint. If not, the order in joint states is preserved. Defaults to None. extend_body_parent_ids (list[int] | None, optional): ID of the bodies to extend. Defaults to None. extend_body_pos (torch.Tensor | None, optional): Position of the extended bodies from their parent bodies. Defaults to None. Returns: BodyState: The constructed BodyState object containing the reordered and extended body and joint states.
build_body_state
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/body_state.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/body_state.py
Apache-2.0
def _reset_mask(self, env_ids: torch.Tensor | None = None): """ Reset the mask used to select which parts of the reference state should be tracked. Every environment uses a different mask. """ if self.cfg.mode.is_distill_mode() or self.cfg.mode.is_distill_test_mode(): if env_ids is None or len(env_ids) == self.num_envs: env_ids = self._robot._ALL_INDICES # Masks are only needed for distillation and testing. self._mask[env_ids] = mask.create_mask( mask_element_names=self.mask_element_names, mask_modes=self.cfg.distill_mask_modes, enable_sparsity_randomization=self.cfg.distill_mask_sparsity_randomization_enabled, device=self.device, num_envs=len(env_ids), )
Reset the mask used to select which parts of the reference state should be tracked. Every environment uses a different mask.
_reset_mask
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/neural_wbc_env.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/neural_wbc_env.py
Apache-2.0
def convert_tensors_and_slices_to_serializable(d): """Recursively convert torch tensors to lists and handle slice objects in a nested dictionary, including lists and tuples.""" if isinstance(d, dict): return {k: convert_tensors_and_slices_to_serializable(v) for k, v in d.items()} elif isinstance(d, torch.Tensor): return {"__type__": "tensor", "data": d.tolist()} elif isinstance(d, list): return [convert_tensors_and_slices_to_serializable(item) for item in d] elif isinstance(d, tuple): return {"__type__": "tuple", "data": tuple(convert_tensors_and_slices_to_serializable(item) for item in d)} elif isinstance(d, slice): return {"__type__": "slice", "start": d.start, "stop": d.stop, "step": d.step} else: return d
Recursively convert torch tensors to lists and handle slice objects in a nested dictionary, including lists and tuples.
convert_tensors_and_slices_to_serializable
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/utils.py
Apache-2.0
def convert_serializable_to_tensors_and_slices(d): """Recursively convert lists back to torch tensors and dictionaries back to slice objects.""" if isinstance(d, dict): if "__type__" in d: if d["__type__"] == "tensor": return torch.tensor(d["data"]) elif d["__type__"] == "slice": return slice(d["start"], d["stop"], d["step"]) elif d["__type__"] == "tuple": return tuple(convert_serializable_to_tensors_and_slices(item) for item in d["data"]) else: return {k: convert_serializable_to_tensors_and_slices(v) for k, v in d.items()} elif isinstance(d, list): return [convert_serializable_to_tensors_and_slices(item) for item in d] else: return d
Recursively convert lists back to torch tensors and dictionaries back to slice objects.
convert_serializable_to_tensors_and_slices
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/utils.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/utils.py
Apache-2.0
def randomize_body_com( env: NeuralWBCEnv, env_ids: torch.Tensor | None, asset_cfg: SceneEntityCfg, distribution_params: tuple[float, float] | tuple[torch.Tensor, torch.Tensor], operation: Literal["add", "abs", "scale"], distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", ): """Randomize the com of the bodies by adding, scaling or setting random values. This function allows randomizing the center of mass of the bodies of the asset. The function samples random values from the given distribution parameters and adds, scales or sets the values into the physics simulation based on the operation. .. tip:: This function uses CPU tensors to assign the body masses. It is recommended to use this function only during the initialization of the environment. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # resolve environment ids if env_ids is None: env_ids = torch.arange(env.scene.num_envs, device="cpu") else: env_ids = env_ids.cpu() # resolve body indices if asset_cfg.body_ids == slice(None): body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu") else: body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu") # get the current masses of the bodies (num_assets, num_bodies) coms = asset.root_physx_view.get_coms() if not hasattr(env, "default_coms"): # Randomize robot base com env.default_coms = coms.clone() env.base_com_bias = torch.zeros((env.num_envs, 3), dtype=torch.float, device=coms.device) # apply randomization on default values coms[env_ids[:, None], body_ids] = env.default_coms[env_ids[:, None], body_ids].clone() dist_fn = resolve_dist_fn(distribution) if isinstance(distribution_params[0], torch.Tensor): distribution_params = (distribution_params[0].to(coms.device), distribution_params[1].to(coms.device)) env.base_com_bias[env_ids, :] = dist_fn( *distribution_params, (env_ids.shape[0], env.base_com_bias.shape[1]), device=coms.device ) # sample from the given range if operation == "add": coms[env_ids[:, None], body_ids, :3] += env.base_com_bias[env_ids[:, None], :] elif operation == "abs": coms[env_ids[:, None], body_ids, :3] = env.base_com_bias[env_ids[:, None], :] elif operation == "scale": coms[env_ids[:, None], body_ids, :3] *= env.base_com_bias[env_ids[:, None], :] else: raise ValueError( f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'abs' or 'scale'." ) # set the mass into the physics simulation asset.root_physx_view.set_coms(coms, env_ids)
Randomize the com of the bodies by adding, scaling or setting random values. This function allows randomizing the center of mass of the bodies of the asset. The function samples random values from the given distribution parameters and adds, scales or sets the values into the physics simulation based on the operation. .. tip:: This function uses CPU tensors to assign the body masses. It is recommended to use this function only during the initialization of the environment.
randomize_body_com
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def randomize_pd_scale( env: NeuralWBCEnv, env_ids: torch.Tensor | None, distribution_params: tuple[float, float], operation: Literal["add", "abs", "scale"], distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", ): """Randomize the scale of the pd gains by adding, scaling or setting random values. This function allows randomizing the scale of the pd gain. The function samples random values from the given distribution parameters and adds, or sets the values into the simulation based on the operation. """ # resolve environment ids if env_ids is None: env_ids = torch.arange(env.scene.num_envs, device="cpu") else: env_ids = env_ids.cpu() # apply randomization on default values kp_scale = env.default_kp_scale.clone() kd_scale = env.default_kd_scale.clone() dist_fn = resolve_dist_fn(distribution) # sample from the given range if operation == "add": kp_scale[env_ids, :] += dist_fn( *distribution_params, (env_ids.shape[0], kp_scale.shape[1]), device=kp_scale.device ) kd_scale[env_ids, :] += dist_fn( *distribution_params, (env_ids.shape[0], kd_scale.shape[1]), device=kd_scale.device ) elif operation == "abs": kp_scale[env_ids, :] = dist_fn( *distribution_params, (env_ids.shape[0], kp_scale.shape[1]), device=kp_scale.device ) kd_scale[env_ids, :] = dist_fn( *distribution_params, (env_ids.shape[0], kd_scale.shape[1]), device=kd_scale.device ) elif operation == "scale": kp_scale[env_ids, :] *= dist_fn( *distribution_params, (env_ids.shape[0], kp_scale.shape[1]), device=kp_scale.device ) kd_scale[env_ids, :] *= dist_fn( *distribution_params, (env_ids.shape[0], kd_scale.shape[1]), device=kd_scale.device ) else: raise ValueError( f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'abs' or 'scale'." ) # set the mass into the physics simulation env.kp_scale[env_ids, :] = kp_scale[env_ids, :] env.kd_scale[env_ids, :] = kd_scale[env_ids, :]
Randomize the scale of the pd gains by adding, scaling or setting random values. This function allows randomizing the scale of the pd gain. The function samples random values from the given distribution parameters and adds, or sets the values into the simulation based on the operation.
randomize_pd_scale
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def randomize_action_noise_range( env: NeuralWBCEnv, env_ids: torch.Tensor | None, distribution_params: tuple[float, float], operation: Literal["add", "abs", "scale"], distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", ): """Randomize the sample range of the added action noise by adding, scaling or setting random values. This function allows randomizing the scale of the sample range of the added action noise. The function samples random values from the given distribution parameters and adds, scales or sets the values into the simulation based on the operation. """ # resolve environment ids if env_ids is None: env_ids = torch.arange(env.scene.num_envs, device="cpu") else: env_ids = env_ids.cpu() # apply randomization on default values rfi_lim = env.default_rfi_lim.clone() dist_fn = resolve_dist_fn(distribution) # sample from the given range if operation == "add": rfi_lim[env_ids, :] += dist_fn( *distribution_params, (env_ids.shape[0], rfi_lim.shape[1]), device=rfi_lim.device ) elif operation == "abs": rfi_lim[env_ids, :] = dist_fn(*distribution_params, (env_ids.shape[0], rfi_lim.shape[1]), device=rfi_lim.device) elif operation == "scale": rfi_lim[env_ids, :] *= dist_fn( *distribution_params, (env_ids.shape[0], rfi_lim.shape[1]), device=rfi_lim.device ) else: raise ValueError( f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'abs' or 'scale'." ) # set the mass into the physics simulation env.rfi_lim[env_ids, :] = rfi_lim[env_ids, :]
Randomize the sample range of the added action noise by adding, scaling or setting random values. This function allows randomizing the scale of the sample range of the added action noise. The function samples random values from the given distribution parameters and adds, scales or sets the values into the simulation based on the operation.
randomize_action_noise_range
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def randomize_motion_ref_xyz( env: NeuralWBCEnv, env_ids: torch.Tensor | None, distribution_params: tuple[float, float] | tuple[torch.Tensor, torch.Tensor], operation: Literal["add", "abs", "scale"], distribution: Literal["uniform", "log_uniform", "gaussian"] = "uniform", ): """Randomize the motion reference x,y,z offset by adding, scaling or setting random values. This function allows randomizing the motion reference x,y,z offset. The function samples random values from the given distribution parameters and adds, scales or sets the values into the simulation based on the operation. """ # resolve environment ids if env_ids is None: env_ids = torch.arange(env.scene.num_envs, device="cpu") else: env_ids = env_ids.cpu() # apply randomization on default values ref_episodic_offset = env.default_ref_episodic_offset.clone() dist_fn = resolve_dist_fn(distribution) if isinstance(distribution_params[0], torch.Tensor): distribution_params = ( distribution_params[0].to(ref_episodic_offset.device), distribution_params[1].to(ref_episodic_offset.device), ) # sample from the given range if operation == "add": ref_episodic_offset[env_ids, :] += dist_fn( *distribution_params, (env_ids.shape[0], ref_episodic_offset.shape[1]), device=ref_episodic_offset.device, ) elif operation == "abs": ref_episodic_offset[env_ids, :] = dist_fn( *distribution_params, (env_ids.shape[0], ref_episodic_offset.shape[1]), device=ref_episodic_offset.device, ) elif operation == "scale": ref_episodic_offset[env_ids, :] *= dist_fn( *distribution_params, (env_ids.shape[0], ref_episodic_offset.shape[1]), device=ref_episodic_offset.device, ) else: raise ValueError( f"Unknown operation: '{operation}' for property randomization. Please use 'add', 'abs' or 'scale'." ) # set the mass into the physics simulation env.ref_episodic_offset[env_ids, :] = ref_episodic_offset[env_ids, :]
Randomize the motion reference x,y,z offset by adding, scaling or setting random values. This function allows randomizing the motion reference x,y,z offset. The function samples random values from the given distribution parameters and adds, scales or sets the values into the simulation based on the operation.
randomize_motion_ref_xyz
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def push_by_setting_velocity_with_recovery( env: NeuralWBCEnv, env_ids: torch.Tensor, velocity_range: dict[str, tuple[float, float]], asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Push the asset by setting the root velocity to a random value within the given ranges. This creates an effect similar to pushing the asset with a random impulse that changes the asset's velocity. It samples the root velocity from the given ranges and sets the velocity into the physics simulation. The function takes a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``. If the dictionary does not contain a key, the velocity is set to zero for that axis. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # velocities vel_w = asset.data.root_vel_w[env_ids] # sample random velocities range_list = [velocity_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] ranges = torch.tensor(range_list, device=asset.device) vel_w[:] = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], vel_w.shape, device=asset.device) # set the velocities into the physics simulation asset.write_root_velocity_to_sim(vel_w, env_ids=env_ids) # give pushed robot time to recover env.recovery_counters[env_ids] = env.cfg.recovery_count
Push the asset by setting the root velocity to a random value within the given ranges. This creates an effect similar to pushing the asset with a random impulse that changes the asset's velocity. It samples the root velocity from the given ranges and sets the velocity into the physics simulation. The function takes a dictionary of velocity ranges for each axis and rotation. The keys of the dictionary are ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. The values are tuples of the form ``(min, max)``. If the dictionary does not contain a key, the velocity is set to zero for that axis.
push_by_setting_velocity_with_recovery
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def reset_robot_state_and_motion( env: NeuralWBCEnv, env_ids: torch.Tensor, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), ): """Reset the robot and reference motion. The full reset percess is: 1. Reset the robot root state to the origin position of its terrain. 2. Reset motion reference to random time step. 3. Moving the motion reference trajectory to the current robot position. 4. Reset the robot joint to reference motion's joint states 5. Reset the robot root state to the reference motion's root state. """ # extract the used quantities (to enable type-hinting) asset: RigidObject | Articulation = env.scene[asset_cfg.name] # get default root state root_states = asset.data.default_root_state[env_ids].clone() root_states[:, :3] += env._terrain.env_origins[env_ids] state = root_states[:, :7] asset.write_root_pose_to_sim(state, env_ids=env_ids) mdp.reset_joints_by_scale(env, env_ids, (1.0, 1.0), (0.0, 0.0), asset_cfg) # Sample new commands env._ref_motion_mgr.reset_motion_start_times(env_ids=env_ids, sample=env.cfg.mode.is_training_mode()) # Record new start locations on terrain env._start_positions_on_terrain[env_ids, ...] = root_states[:, :3] ref_motion_state: ReferenceMotionState = env._ref_motion_mgr.get_state_from_motion_lib_cache( episode_length_buf=0, offset=env._start_positions_on_terrain, terrain_heights=env.get_terrain_heights(), ) env._ref_motion_visualizer.visualize(ref_motion_state) joint_pos = ref_motion_state.joint_pos[env_ids] joint_vel = ref_motion_state.joint_vel[env_ids] asset.write_joint_state_to_sim(joint_pos, joint_vel, env._joint_ids, env_ids=env_ids) root_states = asset.data.default_root_state[env_ids].clone() root_states[:, :3] = ref_motion_state.root_pos[env_ids] root_states[:, 2] += 0.04 # in case under the terrain root_states[:, 3:7] = ref_motion_state.root_rot[env_ids] root_states[:, 7:10] = ref_motion_state.root_lin_vel[env_ids] root_states[:, 10:13] = ref_motion_state.root_ang_vel[env_ids] state = root_states[:, :7] velocities = root_states[:, 7:13] asset.write_root_pose_to_sim(state, env_ids=env_ids) asset.write_root_velocity_to_sim(velocities, env_ids=env_ids)
Reset the robot and reference motion. The full reset percess is: 1. Reset the robot root state to the origin position of its terrain. 2. Reset motion reference to random time step. 3. Moving the motion reference trajectory to the current robot position. 4. Reset the robot joint to reference motion's joint states 5. Reset the robot root state to the reference motion's root state.
reset_robot_state_and_motion
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def update_curriculum( env: NeuralWBCEnv, env_ids: torch.Tensor | None, penalty_level_down_threshold: float, penalty_level_up_threshold: float, penalty_level_degree: float, min_penalty_scale: float, max_penalty_scale: float, num_compute_average_epl: float, ): """ Update average episode length and in turn penalty curriculum. This function is rewritten from update_average_episode_length of legged_gym. When the policy is not able to track the motions, we reduce the penalty to help it explore more actions. When the policy is able to track the motions, we increase the penalty to smooth the actions and reduce the maximum action it uses. """ N = env.num_envs if env_ids is None else len(env_ids) current_average_episode_length = torch.mean(env.episode_length_buf[env_ids], dtype=torch.float) env.average_episode_length = env.average_episode_length * ( 1 - N / num_compute_average_epl ) + current_average_episode_length * (N / num_compute_average_epl) if env.average_episode_length < penalty_level_down_threshold: env.penalty_scale *= 1 - penalty_level_degree elif env.average_episode_length > penalty_level_up_threshold: env.penalty_scale *= 1 + penalty_level_degree env.penalty_scale = np.clip(env.penalty_scale, min_penalty_scale, max_penalty_scale)
Update average episode length and in turn penalty curriculum. This function is rewritten from update_average_episode_length of legged_gym. When the policy is not able to track the motions, we reduce the penalty to help it explore more actions. When the policy is able to track the motions, we increase the penalty to smooth the actions and reduce the maximum action it uses.
update_curriculum
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/events/events.py
Apache-2.0
def compute_reward( self, articulation_data: ArticulationData, body_state: BodyState, ref_motion_state: ReferenceMotionState, previous_actions: torch.Tensor, actions: torch.Tensor, reset_buf: torch.Tensor, timeout_buf: torch.Tensor, penalty_scale: float, ) -> tuple[torch.Tensor, dict]: """ Computes the total reward for the given environment and reference motion state. This function calculates the weighted sum of individual rewards specified in the environment's reward configuration. Each reward is computed using a corresponding reward function defined within the class. The final reward is a sum of these individual rewards, each scaled by a specified factor. Returns: tuple[torch.Tensor, dict]: A tuple containing: - reward_sum (torch.Tensor): The total computed reward for all environments. - rewards (dict): A dictionary with individual reward names as keys and their computed values as values. Raises: AttributeError: If a reward function corresponding to a reward name is not defined. """ reward_sum = torch.zeros([self._num_envs], device=self._device) rewards = {} for reward_name, scale in self._cfg.scales.items(): try: reward_fn = getattr(self, reward_name) except AttributeError: raise AttributeError(f"No reward or penalty function is defined for {reward_name}") rewards[reward_name] = reward_fn( body_state=body_state, ref_motion_state=ref_motion_state, articulation_data=articulation_data, previous_actions=previous_actions, actions=actions, reset_buf=reset_buf, timeout_buf=timeout_buf, ) if reward_name.startswith("penalize"): rewards[reward_name] *= penalty_scale reward_sum += rewards[reward_name] * scale return reward_sum, rewards
Computes the total reward for the given environment and reference motion state. This function calculates the weighted sum of individual rewards specified in the environment's reward configuration. Each reward is computed using a corresponding reward function defined within the class. The final reward is a sum of these individual rewards, each scaled by a specified factor. Returns: tuple[torch.Tensor, dict]: A tuple containing: - reward_sum (torch.Tensor): The total computed reward for all environments. - rewards (dict): A dictionary with individual reward names as keys and their computed values as values. Raises: AttributeError: If a reward function corresponding to a reward name is not defined.
compute_reward
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
Apache-2.0
def reward_track_joint_positions( self, body_state: BodyState, ref_motion_state: ReferenceMotionState, **kwargs, ) -> torch.Tensor: """ Computes the reward for tracking the joint positions of the reference motion. This function is rewritten from _reward_teleop_selected_joint_position of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment. """ joint_pos = body_state.joint_pos ref_joint_pos = ref_motion_state.joint_pos mean_joint_pos_diff_squared = torch.mean(torch.square(ref_joint_pos - joint_pos), dim=1) return torch.exp(-mean_joint_pos_diff_squared / self._cfg.joint_pos_sigma)
Computes the reward for tracking the joint positions of the reference motion. This function is rewritten from _reward_teleop_selected_joint_position of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment.
reward_track_joint_positions
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
Apache-2.0
def reward_track_joint_velocities( self, body_state: BodyState, ref_motion_state: ReferenceMotionState, **kwargs, ) -> torch.Tensor: """ Computes the reward for tracking the joint velocities of the reference motion. This function is rewritten from _reward_teleop_selected_joint_vel of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment. """ joint_vel = body_state.joint_vel ref_joint_vel = ref_motion_state.joint_vel mean_joint_vel_diff_squared = torch.mean(torch.square(ref_joint_vel - joint_vel), dim=1) return torch.exp(-mean_joint_vel_diff_squared / self._cfg.joint_vel_sigma)
Computes the reward for tracking the joint velocities of the reference motion. This function is rewritten from _reward_teleop_selected_joint_vel of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment.
reward_track_joint_velocities
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
Apache-2.0
def reward_track_body_velocities( self, body_state: BodyState, ref_motion_state: ReferenceMotionState, **kwargs, ) -> torch.Tensor: """ Computes a reward based on the difference between the body's velocity and the reference motion's velocity. This function is rewritten from _reward_teleop_body_vel of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment. """ body_vel = body_state.body_lin_vel ref_body_vel = ref_motion_state.body_lin_vel diff_vel = ref_body_vel - body_vel mean_diff_vel_squared = (diff_vel**2).mean(dim=-1).mean(dim=-1) return torch.exp(-mean_diff_vel_squared / self._cfg.body_vel_sigma)
Computes a reward based on the difference between the body's velocity and the reference motion's velocity. This function is rewritten from _reward_teleop_body_vel of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment.
reward_track_body_velocities
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
Apache-2.0
def reward_track_body_angular_velocities( self, body_state: BodyState, ref_motion_state: ReferenceMotionState, **kwargs, ) -> torch.Tensor: """ Computes a reward based on the difference between the body's angular velocity and the reference motion's angular velocity. This function is rewritten from _reward_teleop_body_ang_vel of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment. """ body_ang_vel = body_state.body_ang_vel ref_body_ang_vel = ref_motion_state.body_ang_vel diff_ang_vel = ref_body_ang_vel - body_ang_vel mean_diff_ang_vel_squared = (diff_ang_vel**2).mean(dim=-1).mean(dim=-1) return torch.exp(-mean_diff_ang_vel_squared / self._cfg.body_ang_vel_sigma)
Computes a reward based on the difference between the body's angular velocity and the reference motion's angular velocity. This function is rewritten from _reward_teleop_body_ang_vel of legged_gym. Returns: torch.Tensor: A float tensor of shape (num_envs) representing the computed reward for each environment.
reward_track_body_angular_velocities
python
NVlabs/HOVER
neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/isaac_lab_wrapper/neural_wbc/isaac_lab_wrapper/rewards/rewards.py
Apache-2.0