code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def filtrate_objects(self, obj_list):
"""
Discard objects which are not in self.classes (or its similar classes)
:param obj_list: list
:return: list
"""
type_whitelist = self.classes
if self.mode == 'TRAIN' and cfg.INCLUDE_SIMILAR_TYPE:
type_whitelist = list(self.classes)
if 'Car' in self.classes:
type_whitelist.append('Van')
if 'Pedestrian' in self.classes: # or 'Cyclist' in self.classes:
type_whitelist.append('Person_sitting')
valid_obj_list = []
for obj in obj_list:
if obj.cls_type not in type_whitelist: # rm Van, 20180928
continue
if self.mode == 'TRAIN' and cfg.PC_REDUCE_BY_RANGE and (self.check_pc_range(obj.pos) is False):
continue
valid_obj_list.append(obj)
return valid_obj_list
|
Discard objects which are not in self.classes (or its similar classes)
:param obj_list: list
:return: list
|
filtrate_objects
|
python
|
sshaoshuai/PointRCNN
|
lib/datasets/kitti_rcnn_dataset.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/datasets/kitti_rcnn_dataset.py
|
MIT
|
def get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape):
"""
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param pts_img:
:param pts_rect_depth:
:param img_shape:
:return:
"""
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
if cfg.PC_REDUCE_BY_RANGE:
x_range, y_range, z_range = cfg.PC_AREA_SCOPE
pts_x, pts_y, pts_z = pts_rect[:, 0], pts_rect[:, 1], pts_rect[:, 2]
range_flag = (pts_x >= x_range[0]) & (pts_x <= x_range[1]) \
& (pts_y >= y_range[0]) & (pts_y <= y_range[1]) \
& (pts_z >= z_range[0]) & (pts_z <= z_range[1])
pts_valid_flag = pts_valid_flag & range_flag
return pts_valid_flag
|
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param pts_img:
:param pts_rect_depth:
:param img_shape:
:return:
|
get_valid_flag
|
python
|
sshaoshuai/PointRCNN
|
lib/datasets/kitti_rcnn_dataset.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/datasets/kitti_rcnn_dataset.py
|
MIT
|
def aug_roi_by_noise(self, roi_info):
"""
add noise to original roi to get aug_box3d
:param roi_info:
:return:
"""
roi_box3d, gt_box3d = roi_info['roi_box3d'], roi_info['gt_box3d']
original_iou = roi_info['iou3d']
temp_iou = cnt = 0
pos_thresh = min(cfg.RCNN.REG_FG_THRESH, cfg.RCNN.CLS_FG_THRESH)
gt_corners = kitti_utils.boxes3d_to_corners3d(gt_box3d.reshape(-1, 7))
aug_box3d = roi_box3d
while temp_iou < pos_thresh and cnt < 10:
if roi_info['type'] == 'gt':
aug_box3d = self.random_aug_box3d(roi_box3d) # GT, must random
else:
if np.random.rand() < 0.2:
aug_box3d = roi_box3d # p=0.2 to keep the original roi box
else:
aug_box3d = self.random_aug_box3d(roi_box3d)
aug_corners = kitti_utils.boxes3d_to_corners3d(aug_box3d.reshape(-1, 7))
iou3d = kitti_utils.get_iou3d(aug_corners, gt_corners)
temp_iou = iou3d[0][0]
cnt += 1
if original_iou < pos_thresh: # original bg, break
break
return aug_box3d
|
add noise to original roi to get aug_box3d
:param roi_info:
:return:
|
aug_roi_by_noise
|
python
|
sshaoshuai/PointRCNN
|
lib/datasets/kitti_rcnn_dataset.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/datasets/kitti_rcnn_dataset.py
|
MIT
|
def random_aug_box3d(box3d):
"""
:param box3d: (7) [x, y, z, h, w, l, ry]
random shift, scale, orientation
"""
if cfg.RCNN.REG_AUG_METHOD == 'single':
pos_shift = (np.random.rand(3) - 0.5) # [-0.5 ~ 0.5]
hwl_scale = (np.random.rand(3) - 0.5) / (0.5 / 0.15) + 1.0 #
angle_rot = (np.random.rand(1) - 0.5) / (0.5 / (np.pi / 12)) # [-pi/12 ~ pi/12]
aug_box3d = np.concatenate([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale,
box3d[6:7] + angle_rot])
return aug_box3d
elif cfg.RCNN.REG_AUG_METHOD == 'multiple':
# pos_range, hwl_range, angle_range, mean_iou
range_config = [[0.2, 0.1, np.pi / 12, 0.7],
[0.3, 0.15, np.pi / 12, 0.6],
[0.5, 0.15, np.pi / 9, 0.5],
[0.8, 0.15, np.pi / 6, 0.3],
[1.0, 0.15, np.pi / 3, 0.2]]
idx = np.random.randint(len(range_config))
pos_shift = ((np.random.rand(3) - 0.5) / 0.5) * range_config[idx][0]
hwl_scale = ((np.random.rand(3) - 0.5) / 0.5) * range_config[idx][1] + 1.0
angle_rot = ((np.random.rand(1) - 0.5) / 0.5) * range_config[idx][2]
aug_box3d = np.concatenate([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale, box3d[6:7] + angle_rot])
return aug_box3d
elif cfg.RCNN.REG_AUG_METHOD == 'normal':
x_shift = np.random.normal(loc=0, scale=0.3)
y_shift = np.random.normal(loc=0, scale=0.2)
z_shift = np.random.normal(loc=0, scale=0.3)
h_shift = np.random.normal(loc=0, scale=0.25)
w_shift = np.random.normal(loc=0, scale=0.15)
l_shift = np.random.normal(loc=0, scale=0.5)
ry_shift = ((np.random.rand() - 0.5) / 0.5) * np.pi / 12
aug_box3d = np.array([box3d[0] + x_shift, box3d[1] + y_shift, box3d[2] + z_shift, box3d[3] + h_shift,
box3d[4] + w_shift, box3d[5] + l_shift, box3d[6] + ry_shift])
return aug_box3d
else:
raise NotImplementedError
|
:param box3d: (7) [x, y, z, h, w, l, ry]
random shift, scale, orientation
|
random_aug_box3d
|
python
|
sshaoshuai/PointRCNN
|
lib/datasets/kitti_rcnn_dataset.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/datasets/kitti_rcnn_dataset.py
|
MIT
|
def distance_based_proposal(self, scores, proposals, order):
"""
propose rois in two area based on the distance
:param scores: (N)
:param proposals: (N, 7)
:param order: (N)
"""
nms_range_list = [0, 40.0, 80.0]
pre_tot_top_n = cfg[self.mode].RPN_PRE_NMS_TOP_N
pre_top_n_list = [0, int(pre_tot_top_n * 0.7), pre_tot_top_n - int(pre_tot_top_n * 0.7)]
post_tot_top_n = cfg[self.mode].RPN_POST_NMS_TOP_N
post_top_n_list = [0, int(post_tot_top_n * 0.7), post_tot_top_n - int(post_tot_top_n * 0.7)]
scores_single_list, proposals_single_list = [], []
# sort by score
scores_ordered = scores[order]
proposals_ordered = proposals[order]
dist = proposals_ordered[:, 2]
first_mask = (dist > nms_range_list[0]) & (dist <= nms_range_list[1])
for i in range(1, len(nms_range_list)):
# get proposal distance mask
dist_mask = ((dist > nms_range_list[i - 1]) & (dist <= nms_range_list[i]))
if dist_mask.sum() != 0:
# this area has points
# reduce by mask
cur_scores = scores_ordered[dist_mask]
cur_proposals = proposals_ordered[dist_mask]
# fetch pre nms top K
cur_scores = cur_scores[:pre_top_n_list[i]]
cur_proposals = cur_proposals[:pre_top_n_list[i]]
else:
assert i == 2, '%d' % i
# this area doesn't have any points, so use rois of first area
cur_scores = scores_ordered[first_mask]
cur_proposals = proposals_ordered[first_mask]
# fetch top K of first area
cur_scores = cur_scores[pre_top_n_list[i - 1]:][:pre_top_n_list[i]]
cur_proposals = cur_proposals[pre_top_n_list[i - 1]:][:pre_top_n_list[i]]
# oriented nms
boxes_bev = kitti_utils.boxes3d_to_bev_torch(cur_proposals)
if cfg.RPN.NMS_TYPE == 'rotate':
keep_idx = iou3d_utils.nms_gpu(boxes_bev, cur_scores, cfg[self.mode].RPN_NMS_THRESH)
elif cfg.RPN.NMS_TYPE == 'normal':
keep_idx = iou3d_utils.nms_normal_gpu(boxes_bev, cur_scores, cfg[self.mode].RPN_NMS_THRESH)
else:
raise NotImplementedError
# Fetch post nms top k
keep_idx = keep_idx[:post_top_n_list[i]]
scores_single_list.append(cur_scores[keep_idx])
proposals_single_list.append(cur_proposals[keep_idx])
scores_single = torch.cat(scores_single_list, dim=0)
proposals_single = torch.cat(proposals_single_list, dim=0)
return scores_single, proposals_single
|
propose rois in two area based on the distance
:param scores: (N)
:param proposals: (N, 7)
:param order: (N)
|
distance_based_proposal
|
python
|
sshaoshuai/PointRCNN
|
lib/rpn/proposal_layer.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/rpn/proposal_layer.py
|
MIT
|
def score_based_proposal(self, scores, proposals, order):
"""
propose rois in two area based on the distance
:param scores: (N)
:param proposals: (N, 7)
:param order: (N)
"""
# sort by score
scores_ordered = scores[order]
proposals_ordered = proposals[order]
# pre nms top K
cur_scores = scores_ordered[:cfg[self.mode].RPN_PRE_NMS_TOP_N]
cur_proposals = proposals_ordered[:cfg[self.mode].RPN_PRE_NMS_TOP_N]
boxes_bev = kitti_utils.boxes3d_to_bev_torch(cur_proposals)
keep_idx = iou3d_utils.nms_gpu(boxes_bev, cur_scores, cfg[self.mode].RPN_NMS_THRESH)
# Fetch post nms top k
keep_idx = keep_idx[:cfg[self.mode].RPN_POST_NMS_TOP_N]
return cur_scores[keep_idx], cur_proposals[keep_idx]
|
propose rois in two area based on the distance
:param scores: (N)
:param proposals: (N, 7)
:param order: (N)
|
score_based_proposal
|
python
|
sshaoshuai/PointRCNN
|
lib/rpn/proposal_layer.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/rpn/proposal_layer.py
|
MIT
|
def random_aug_box3d(box3d):
"""
:param box3d: (7) [x, y, z, h, w, l, ry]
random shift, scale, orientation
"""
if cfg.RCNN.REG_AUG_METHOD == 'single':
pos_shift = (torch.rand(3, device=box3d.device) - 0.5) # [-0.5 ~ 0.5]
hwl_scale = (torch.rand(3, device=box3d.device) - 0.5) / (0.5 / 0.15) + 1.0 #
angle_rot = (torch.rand(1, device=box3d.device) - 0.5) / (0.5 / (np.pi / 12)) # [-pi/12 ~ pi/12]
aug_box3d = torch.cat([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale, box3d[6:7] + angle_rot], dim=0)
return aug_box3d
elif cfg.RCNN.REG_AUG_METHOD == 'multiple':
# pos_range, hwl_range, angle_range, mean_iou
range_config = [[0.2, 0.1, np.pi / 12, 0.7],
[0.3, 0.15, np.pi / 12, 0.6],
[0.5, 0.15, np.pi / 9, 0.5],
[0.8, 0.15, np.pi / 6, 0.3],
[1.0, 0.15, np.pi / 3, 0.2]]
idx = torch.randint(low=0, high=len(range_config), size=(1,))[0].long()
pos_shift = ((torch.rand(3, device=box3d.device) - 0.5) / 0.5) * range_config[idx][0]
hwl_scale = ((torch.rand(3, device=box3d.device) - 0.5) / 0.5) * range_config[idx][1] + 1.0
angle_rot = ((torch.rand(1, device=box3d.device) - 0.5) / 0.5) * range_config[idx][2]
aug_box3d = torch.cat([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale, box3d[6:7] + angle_rot], dim=0)
return aug_box3d
elif cfg.RCNN.REG_AUG_METHOD == 'normal':
x_shift = np.random.normal(loc=0, scale=0.3)
y_shift = np.random.normal(loc=0, scale=0.2)
z_shift = np.random.normal(loc=0, scale=0.3)
h_shift = np.random.normal(loc=0, scale=0.25)
w_shift = np.random.normal(loc=0, scale=0.15)
l_shift = np.random.normal(loc=0, scale=0.5)
ry_shift = ((torch.rand() - 0.5) / 0.5) * np.pi / 12
aug_box3d = np.array([box3d[0] + x_shift, box3d[1] + y_shift, box3d[2] + z_shift, box3d[3] + h_shift,
box3d[4] + w_shift, box3d[5] + l_shift, box3d[6] + ry_shift], dtype=np.float32)
aug_box3d = torch.from_numpy(aug_box3d).type_as(box3d)
return aug_box3d
else:
raise NotImplementedError
|
:param box3d: (7) [x, y, z, h, w, l, ry]
random shift, scale, orientation
|
random_aug_box3d
|
python
|
sshaoshuai/PointRCNN
|
lib/rpn/proposal_target_layer.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/rpn/proposal_target_layer.py
|
MIT
|
def data_augmentation(self, pts, rois, gt_of_rois):
"""
:param pts: (B, M, 512, 3)
:param rois: (B, M. 7)
:param gt_of_rois: (B, M, 7)
:return:
"""
batch_size, boxes_num = pts.shape[0], pts.shape[1]
# rotation augmentation
angles = (torch.rand((batch_size, boxes_num), device=pts.device) - 0.5 / 0.5) * (np.pi / cfg.AUG_ROT_RANGE)
# calculate gt alpha from gt_of_rois
temp_x, temp_z, temp_ry = gt_of_rois[:, :, 0], gt_of_rois[:, :, 2], gt_of_rois[:, :, 6]
temp_beta = torch.atan2(temp_z, temp_x)
gt_alpha = -torch.sign(temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)
temp_x, temp_z, temp_ry = rois[:, :, 0], rois[:, :, 2], rois[:, :, 6]
temp_beta = torch.atan2(temp_z, temp_x)
roi_alpha = -torch.sign(temp_beta) * np.pi / 2 + temp_beta + temp_ry # (B, M)
for k in range(batch_size):
pts[k] = kitti_utils.rotate_pc_along_y_torch(pts[k], angles[k])
gt_of_rois[k] = kitti_utils.rotate_pc_along_y_torch(gt_of_rois[k].unsqueeze(dim=1), angles[k]).squeeze(dim=1)
rois[k] = kitti_utils.rotate_pc_along_y_torch(rois[k].unsqueeze(dim=1), angles[k]).squeeze(dim=1)
# calculate the ry after rotation
temp_x, temp_z = gt_of_rois[:, :, 0], gt_of_rois[:, :, 2]
temp_beta = torch.atan2(temp_z, temp_x)
gt_of_rois[:, :, 6] = torch.sign(temp_beta) * np.pi / 2 + gt_alpha - temp_beta
temp_x, temp_z = rois[:, :, 0], rois[:, :, 2]
temp_beta = torch.atan2(temp_z, temp_x)
rois[:, :, 6] = torch.sign(temp_beta) * np.pi / 2 + roi_alpha - temp_beta
# scaling augmentation
scales = 1 + ((torch.rand((batch_size, boxes_num), device=pts.device) - 0.5) / 0.5) * 0.05
pts = pts * scales.unsqueeze(dim=2).unsqueeze(dim=3)
gt_of_rois[:, :, 0:6] = gt_of_rois[:, :, 0:6] * scales.unsqueeze(dim=2)
rois[:, :, 0:6] = rois[:, :, 0:6] * scales.unsqueeze(dim=2)
# flip augmentation
flip_flag = torch.sign(torch.rand((batch_size, boxes_num), device=pts.device) - 0.5)
pts[:, :, :, 0] = pts[:, :, :, 0] * flip_flag.unsqueeze(dim=2)
gt_of_rois[:, :, 0] = gt_of_rois[:, :, 0] * flip_flag
# flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry
src_ry = gt_of_rois[:, :, 6]
ry = (flip_flag == 1).float() * src_ry + (flip_flag == -1).float() * (torch.sign(src_ry) * np.pi - src_ry)
gt_of_rois[:, :, 6] = ry
rois[:, :, 0] = rois[:, :, 0] * flip_flag
# flip orientation: ry > 0: pi - ry, ry < 0: -pi - ry
src_ry = rois[:, :, 6]
ry = (flip_flag == 1).float() * src_ry + (flip_flag == -1).float() * (torch.sign(src_ry) * np.pi - src_ry)
rois[:, :, 6] = ry
return pts, rois, gt_of_rois
|
:param pts: (B, M, 512, 3)
:param rois: (B, M. 7)
:param gt_of_rois: (B, M, 7)
:return:
|
data_augmentation
|
python
|
sshaoshuai/PointRCNN
|
lib/rpn/proposal_target_layer.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/rpn/proposal_target_layer.py
|
MIT
|
def decode_bbox_target(roi_box3d, pred_reg, loc_scope, loc_bin_size, num_head_bin, anchor_size,
get_xz_fine=True, get_y_by_bin=False, loc_y_scope=0.5, loc_y_bin_size=0.25, get_ry_fine=False):
"""
:param roi_box3d: (N, 7)
:param pred_reg: (N, C)
:param loc_scope:
:param loc_bin_size:
:param num_head_bin:
:param anchor_size:
:param get_xz_fine:
:param get_y_by_bin:
:param loc_y_scope:
:param loc_y_bin_size:
:param get_ry_fine:
:return:
"""
anchor_size = anchor_size.to(roi_box3d.get_device())
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
# recover xz localization
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
x_bin = torch.argmax(pred_reg[:, x_bin_l: x_bin_r], dim=1)
z_bin = torch.argmax(pred_reg[:, z_bin_l: z_bin_r], dim=1)
pos_x = x_bin.float() * loc_bin_size + loc_bin_size / 2 - loc_scope
pos_z = z_bin.float() * loc_bin_size + loc_bin_size / 2 - loc_scope
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_norm = torch.gather(pred_reg[:, x_res_l: x_res_r], dim=1, index=x_bin.unsqueeze(dim=1)).squeeze(dim=1)
z_res_norm = torch.gather(pred_reg[:, z_res_l: z_res_r], dim=1, index=z_bin.unsqueeze(dim=1)).squeeze(dim=1)
x_res = x_res_norm * loc_bin_size
z_res = z_res_norm * loc_bin_size
pos_x += x_res
pos_z += z_res
# recover y localization
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_bin = torch.argmax(pred_reg[:, y_bin_l: y_bin_r], dim=1)
y_res_norm = torch.gather(pred_reg[:, y_res_l: y_res_r], dim=1, index=y_bin.unsqueeze(dim=1)).squeeze(dim=1)
y_res = y_res_norm * loc_y_bin_size
pos_y = y_bin.float() * loc_y_bin_size + loc_y_bin_size / 2 - loc_y_scope + y_res
pos_y = pos_y + roi_box3d[:, 1]
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
pos_y = roi_box3d[:, 1] + pred_reg[:, y_offset_l]
# recover ry rotation
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_bin = torch.argmax(pred_reg[:, ry_bin_l: ry_bin_r], dim=1)
ry_res_norm = torch.gather(pred_reg[:, ry_res_l: ry_res_r], dim=1, index=ry_bin.unsqueeze(dim=1)).squeeze(dim=1)
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_res = ry_res_norm * (angle_per_class / 2)
ry = (ry_bin.float() * angle_per_class + angle_per_class / 2) + ry_res - np.pi / 4
else:
angle_per_class = (2 * np.pi) / num_head_bin
ry_res = ry_res_norm * (angle_per_class / 2)
# bin_center is (0, 30, 60, 90, 120, ..., 270, 300, 330)
ry = (ry_bin.float() * angle_per_class + ry_res) % (2 * np.pi)
ry[ry > np.pi] -= 2 * np.pi
# recover size
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert size_res_r == pred_reg.shape[1]
size_res_norm = pred_reg[:, size_res_l: size_res_r]
hwl = size_res_norm * anchor_size + anchor_size
# shift to original coords
roi_center = roi_box3d[:, 0:3]
shift_ret_box3d = torch.cat((pos_x.view(-1, 1), pos_y.view(-1, 1), pos_z.view(-1, 1), hwl, ry.view(-1, 1)), dim=1)
ret_box3d = shift_ret_box3d
if roi_box3d.shape[1] == 7:
roi_ry = roi_box3d[:, 6]
ret_box3d = rotate_pc_along_y_torch(shift_ret_box3d, - roi_ry)
ret_box3d[:, 6] += roi_ry
ret_box3d[:, [0, 2]] += roi_center[:, [0, 2]]
return ret_box3d
|
:param roi_box3d: (N, 7)
:param pred_reg: (N, C)
:param loc_scope:
:param loc_bin_size:
:param num_head_bin:
:param anchor_size:
:param get_xz_fine:
:param get_y_by_bin:
:param loc_y_scope:
:param loc_y_bin_size:
:param get_ry_fine:
:return:
|
decode_bbox_target
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/bbox_transform.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/bbox_transform.py
|
MIT
|
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
|
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
|
corners3d_to_img_boxes
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/calibration.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/calibration.py
|
MIT
|
def camera_dis_to_rect(self, u, v, d):
"""
Can only process valid u, v, d, which means u, v can not beyond the image shape, reprojection error 0.02
:param u: (N)
:param v: (N)
:param d: (N), the distance between camera and 3d points, d^2 = x^2 + y^2 + z^2
:return:
"""
assert self.fu == self.fv, '%.8f != %.8f' % (self.fu, self.fv)
fd = np.sqrt((u - self.cu)**2 + (v - self.cv)**2 + self.fu**2)
x = ((u - self.cu) * d) / fd + self.tx
y = ((v - self.cv) * d) / fd + self.ty
z = np.sqrt(d**2 - x**2 - y**2)
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)), axis=1)
return pts_rect
|
Can only process valid u, v, d, which means u, v can not beyond the image shape, reprojection error 0.02
:param u: (N)
:param v: (N)
:param d: (N), the distance between camera and 3d points, d^2 = x^2 + y^2 + z^2
:return:
|
camera_dis_to_rect
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/calibration.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/calibration.py
|
MIT
|
def dist_to_plane(plane, points):
"""
Calculates the signed distance from a 3D plane to each point in a list of points
:param plane: (a, b, c, d)
:param points: (N, 3)
:return: (N), signed distance of each point to the plane
"""
a, b, c, d = plane
points = np.array(points)
x = points[:, 0]
y = points[:, 1]
z = points[:, 2]
return (a*x + b*y + c*z + d) / np.sqrt(a**2 + b**2 + c**2)
|
Calculates the signed distance from a 3D plane to each point in a list of points
:param plane: (a, b, c, d)
:param points: (N, 3)
:return: (N), signed distance of each point to the plane
|
dist_to_plane
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/kitti_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/kitti_utils.py
|
MIT
|
def rotate_pc_along_y(pc, rot_angle):
"""
params pc: (N, 3+C), (N, 3) is in the rectified camera coordinate
params rot_angle: rad scalar
Output pc: updated pc with XYZ rotated
"""
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
|
params pc: (N, 3+C), (N, 3) is in the rectified camera coordinate
params rot_angle: rad scalar
Output pc: updated pc with XYZ rotated
|
rotate_pc_along_y
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/kitti_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/kitti_utils.py
|
MIT
|
def rotate_pc_along_y_torch(pc, rot_angle):
"""
:param pc: (N, 512, 3 + C)
:param rot_angle: (N)
:return:
TODO: merge with rotate_pc_along_y_torch in bbox_transform.py
"""
cosa = torch.cos(rot_angle).view(-1, 1) # (N, 1)
sina = torch.sin(rot_angle).view(-1, 1) # (N, 1)
raw_1 = torch.cat([cosa, -sina], dim=1) # (N, 2)
raw_2 = torch.cat([sina, cosa], dim=1) # (N, 2)
R = torch.cat((raw_1.unsqueeze(dim=1), raw_2.unsqueeze(dim=1)), dim=1) # (N, 2, 2)
pc_temp = pc[:, :, [0, 2]] # (N, 512, 2)
pc[:, :, [0, 2]] = torch.matmul(pc_temp, R.permute(0, 2, 1)) # (N, 512, 2)
return pc
|
:param pc: (N, 512, 3 + C)
:param rot_angle: (N)
:return:
TODO: merge with rotate_pc_along_y_torch in bbox_transform.py
|
rotate_pc_along_y_torch
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/kitti_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/kitti_utils.py
|
MIT
|
def in_hull(p, hull):
"""
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
"""
try:
if not isinstance(hull, Delaunay):
hull = Delaunay(hull)
flag = hull.find_simplex(p) >= 0
except scipy.spatial.qhull.QhullError:
print('Warning: not a hull %s' % str(hull))
flag = np.zeros(p.shape[0], dtype=np.bool)
return flag
|
:param p: (N, K) test points
:param hull: (M, K) M corners of a box
:return (N) bool
|
in_hull
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/kitti_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/kitti_utils.py
|
MIT
|
def get_iou3d(corners3d, query_corners3d, need_bev=False):
"""
:param corners3d: (N, 8, 3) in rect coords
:param query_corners3d: (M, 8, 3)
:return:
"""
from shapely.geometry import Polygon
A, B = corners3d, query_corners3d
N, M = A.shape[0], B.shape[0]
iou3d = np.zeros((N, M), dtype=np.float32)
iou_bev = np.zeros((N, M), dtype=np.float32)
# for height overlap, since y face down, use the negative y
min_h_a = -A[:, 0:4, 1].sum(axis=1) / 4.0
max_h_a = -A[:, 4:8, 1].sum(axis=1) / 4.0
min_h_b = -B[:, 0:4, 1].sum(axis=1) / 4.0
max_h_b = -B[:, 4:8, 1].sum(axis=1) / 4.0
for i in range(N):
for j in range(M):
max_of_min = np.max([min_h_a[i], min_h_b[j]])
min_of_max = np.min([max_h_a[i], max_h_b[j]])
h_overlap = np.max([0, min_of_max - max_of_min])
if h_overlap == 0:
continue
bottom_a, bottom_b = Polygon(A[i, 0:4, [0, 2]].T), Polygon(B[j, 0:4, [0, 2]].T)
if bottom_a.is_valid and bottom_b.is_valid:
# check is valid, A valid Polygon may not possess any overlapping exterior or interior rings.
bottom_overlap = bottom_a.intersection(bottom_b).area
else:
bottom_overlap = 0.
overlap3d = bottom_overlap * h_overlap
union3d = bottom_a.area * (max_h_a[i] - min_h_a[i]) + bottom_b.area * (max_h_b[j] - min_h_b[j]) - overlap3d
iou3d[i][j] = overlap3d / union3d
iou_bev[i][j] = bottom_overlap / (bottom_a.area + bottom_b.area - bottom_overlap)
if need_bev:
return iou3d, iou_bev
return iou3d
|
:param corners3d: (N, 8, 3) in rect coords
:param query_corners3d: (M, 8, 3)
:return:
|
get_iou3d
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/kitti_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/kitti_utils.py
|
MIT
|
def forward(self, input, target):
"""
:param input: (N), logit
:param target: (N), {0, 1}
:return:
"""
input = torch.sigmoid(input.view(-1))
target = target.float().view(-1)
mask = (target != self.ignore_target).float()
return 1.0 - (torch.min(input, target) * mask).sum() / torch.clamp((torch.max(input, target) * mask).sum(), min=1.0)
|
:param input: (N), logit
:param target: (N), {0, 1}
:return:
|
forward
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/loss_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/loss_utils.py
|
MIT
|
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
super().__init__()
self._alpha = alpha
self._gamma = gamma
|
Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
|
__init__
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/loss_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/loss_utils.py
|
MIT
|
def forward(self,
prediction_tensor,
target_tensor,
weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = (target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent)
return focal_cross_entropy_loss * weights
|
Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
|
forward
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/loss_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/loss_utils.py
|
MIT
|
def get_reg_loss(pred_reg, reg_label, loc_scope, loc_bin_size, num_head_bin, anchor_size,
get_xz_fine=True, get_y_by_bin=False, loc_y_scope=0.5, loc_y_bin_size=0.25, get_ry_fine=False):
"""
Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.
:param pred_reg: (N, C)
:param reg_label: (N, 7) [dx, dy, dz, h, w, l, ry]
:param loc_scope: constant
:param loc_bin_size: constant
:param num_head_bin: constant
:param anchor_size: (N, 3) or (3)
:param get_xz_fine:
:param get_y_by_bin:
:param loc_y_scope:
:param loc_y_bin_size:
:param get_ry_fine:
:return:
"""
per_loc_bin_num = int(loc_scope / loc_bin_size) * 2
loc_y_bin_num = int(loc_y_scope / loc_y_bin_size) * 2
reg_loss_dict = {}
loc_loss = 0
# xz localization loss
x_offset_label, y_offset_label, z_offset_label = reg_label[:, 0], reg_label[:, 1], reg_label[:, 2]
x_shift = torch.clamp(x_offset_label + loc_scope, 0, loc_scope * 2 - 1e-3)
z_shift = torch.clamp(z_offset_label + loc_scope, 0, loc_scope * 2 - 1e-3)
x_bin_label = (x_shift / loc_bin_size).floor().long()
z_bin_label = (z_shift / loc_bin_size).floor().long()
x_bin_l, x_bin_r = 0, per_loc_bin_num
z_bin_l, z_bin_r = per_loc_bin_num, per_loc_bin_num * 2
start_offset = z_bin_r
loss_x_bin = F.cross_entropy(pred_reg[:, x_bin_l: x_bin_r], x_bin_label)
loss_z_bin = F.cross_entropy(pred_reg[:, z_bin_l: z_bin_r], z_bin_label)
reg_loss_dict['loss_x_bin'] = loss_x_bin.item()
reg_loss_dict['loss_z_bin'] = loss_z_bin.item()
loc_loss += loss_x_bin + loss_z_bin
if get_xz_fine:
x_res_l, x_res_r = per_loc_bin_num * 2, per_loc_bin_num * 3
z_res_l, z_res_r = per_loc_bin_num * 3, per_loc_bin_num * 4
start_offset = z_res_r
x_res_label = x_shift - (x_bin_label.float() * loc_bin_size + loc_bin_size / 2)
z_res_label = z_shift - (z_bin_label.float() * loc_bin_size + loc_bin_size / 2)
x_res_norm_label = x_res_label / loc_bin_size
z_res_norm_label = z_res_label / loc_bin_size
x_bin_onehot = torch.cuda.FloatTensor(x_bin_label.size(0), per_loc_bin_num).zero_()
x_bin_onehot.scatter_(1, x_bin_label.view(-1, 1).long(), 1)
z_bin_onehot = torch.cuda.FloatTensor(z_bin_label.size(0), per_loc_bin_num).zero_()
z_bin_onehot.scatter_(1, z_bin_label.view(-1, 1).long(), 1)
loss_x_res = F.smooth_l1_loss((pred_reg[:, x_res_l: x_res_r] * x_bin_onehot).sum(dim=1), x_res_norm_label)
loss_z_res = F.smooth_l1_loss((pred_reg[:, z_res_l: z_res_r] * z_bin_onehot).sum(dim=1), z_res_norm_label)
reg_loss_dict['loss_x_res'] = loss_x_res.item()
reg_loss_dict['loss_z_res'] = loss_z_res.item()
loc_loss += loss_x_res + loss_z_res
# y localization loss
if get_y_by_bin:
y_bin_l, y_bin_r = start_offset, start_offset + loc_y_bin_num
y_res_l, y_res_r = y_bin_r, y_bin_r + loc_y_bin_num
start_offset = y_res_r
y_shift = torch.clamp(y_offset_label + loc_y_scope, 0, loc_y_scope * 2 - 1e-3)
y_bin_label = (y_shift / loc_y_bin_size).floor().long()
y_res_label = y_shift - (y_bin_label.float() * loc_y_bin_size + loc_y_bin_size / 2)
y_res_norm_label = y_res_label / loc_y_bin_size
y_bin_onehot = torch.cuda.FloatTensor(y_bin_label.size(0), loc_y_bin_num).zero_()
y_bin_onehot.scatter_(1, y_bin_label.view(-1, 1).long(), 1)
loss_y_bin = F.cross_entropy(pred_reg[:, y_bin_l: y_bin_r], y_bin_label)
loss_y_res = F.smooth_l1_loss((pred_reg[:, y_res_l: y_res_r] * y_bin_onehot).sum(dim=1), y_res_norm_label)
reg_loss_dict['loss_y_bin'] = loss_y_bin.item()
reg_loss_dict['loss_y_res'] = loss_y_res.item()
loc_loss += loss_y_bin + loss_y_res
else:
y_offset_l, y_offset_r = start_offset, start_offset + 1
start_offset = y_offset_r
loss_y_offset = F.smooth_l1_loss(pred_reg[:, y_offset_l: y_offset_r].sum(dim=1), y_offset_label)
reg_loss_dict['loss_y_offset'] = loss_y_offset.item()
loc_loss += loss_y_offset
# angle loss
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
ry_label = reg_label[:, 6]
if get_ry_fine:
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5)
ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)
shift_angle = torch.clamp(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3) # (0, pi/2)
# bin center is (5, 10, 15, ..., 85)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
# divide 2pi into several bins
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
ry_bin_onehot = torch.cuda.FloatTensor(ry_bin_label.size(0), num_head_bin).zero_()
ry_bin_onehot.scatter_(1, ry_bin_label.view(-1, 1).long(), 1)
loss_ry_bin = F.cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)
loss_ry_res = F.smooth_l1_loss((pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot).sum(dim=1), ry_res_norm_label)
reg_loss_dict['loss_ry_bin'] = loss_ry_bin.item()
reg_loss_dict['loss_ry_res'] = loss_ry_res.item()
angle_loss = loss_ry_bin + loss_ry_res
# size loss
size_res_l, size_res_r = ry_res_r, ry_res_r + 3
assert pred_reg.shape[1] == size_res_r, '%d vs %d' % (pred_reg.shape[1], size_res_r)
size_res_norm_label = (reg_label[:, 3:6] - anchor_size) / anchor_size
size_res_norm = pred_reg[:, size_res_l:size_res_r]
size_loss = F.smooth_l1_loss(size_res_norm, size_res_norm_label)
# Total regression loss
reg_loss_dict['loss_loc'] = loc_loss
reg_loss_dict['loss_angle'] = angle_loss
reg_loss_dict['loss_size'] = size_loss
return loc_loss, angle_loss, size_loss, reg_loss_dict
|
Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.
:param pred_reg: (N, C)
:param reg_label: (N, 7) [dx, dy, dz, h, w, l, ry]
:param loc_scope: constant
:param loc_bin_size: constant
:param num_head_bin: constant
:param anchor_size: (N, 3) or (3)
:param get_xz_fine:
:param get_y_by_bin:
:param loc_y_scope:
:param loc_y_bin_size:
:param get_ry_fine:
:return:
|
get_reg_loss
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/loss_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/loss_utils.py
|
MIT
|
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.pos
return corners3d
|
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
|
generate_corners3d
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/object3d.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/object3d.py
|
MIT
|
def to_bev_box2d(self, oblique=True, voxel_size=0.1):
"""
:param bev_shape: (2) for bev shape (h, w), => (y_max, x_max) in image
:param voxel_size: float, 0.1m
:param oblique:
:return: box2d (4, 2)/ (4) in image coordinate
"""
if oblique:
corners3d = self.generate_corners3d()
xz_corners = corners3d[0:4, [0, 2]]
box2d = np.zeros((4, 2), dtype=np.int32)
box2d[:, 0] = ((xz_corners[:, 0] - Object3d.MIN_XZ[0]) / voxel_size).astype(np.int32)
box2d[:, 1] = Object3d.BEV_SHAPE[0] - 1 - ((xz_corners[:, 1] - Object3d.MIN_XZ[1]) / voxel_size).astype(np.int32)
box2d[:, 0] = np.clip(box2d[:, 0], 0, Object3d.BEV_SHAPE[1])
box2d[:, 1] = np.clip(box2d[:, 1], 0, Object3d.BEV_SHAPE[0])
else:
box2d = np.zeros(4, dtype=np.int32)
# discrete_center = np.floor((self.pos / voxel_size)).astype(np.int32)
cu = np.floor((self.pos[0] - Object3d.MIN_XZ[0]) / voxel_size).astype(np.int32)
cv = Object3d.BEV_SHAPE[0] - 1 - ((self.pos[2] - Object3d.MIN_XZ[1]) / voxel_size).astype(np.int32)
half_l, half_w = int(self.l / voxel_size / 2), int(self.w / voxel_size / 2)
box2d[0], box2d[1] = cu - half_l, cv - half_w
box2d[2], box2d[3] = cu + half_l, cv + half_w
return box2d
|
:param bev_shape: (2) for bev shape (h, w), => (y_max, x_max) in image
:param voxel_size: float, 0.1m
:param oblique:
:return: box2d (4, 2)/ (4) in image coordinate
|
to_bev_box2d
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/object3d.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/object3d.py
|
MIT
|
def nms_gpu(boxes, scores, thresh):
"""
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
"""
# areas = (x2 - x1) * (y2 - y1)
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous()
|
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
|
nms_gpu
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/iou3d/iou3d_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/iou3d/iou3d_utils.py
|
MIT
|
def nms_normal_gpu(boxes, scores, thresh):
"""
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
"""
# areas = (x2 - x1) * (y2 - y1)
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh)
return order[keep[:num_out].cuda()].contiguous()
|
:param boxes: (N, 5) [x1, y1, x2, y2, ry]
:param scores: (N)
:param thresh:
:return:
|
nms_normal_gpu
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/iou3d/iou3d_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/iou3d/iou3d_utils.py
|
MIT
|
def roipool3d_gpu(pts, pts_feature, boxes3d, pool_extra_width, sampled_pt_num=512):
"""
:param pts: (B, N, 3)
:param pts_feature: (B, N, C)
:param boxes3d: (B, M, 7)
:param pool_extra_width: float
:param sampled_pt_num: int
:return:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
"""
batch_size, boxes_num, feature_len = pts.shape[0], boxes3d.shape[1], pts_feature.shape[2]
pooled_boxes3d = kitti_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7)
pooled_features = torch.cuda.FloatTensor(torch.Size((batch_size, boxes_num,
sampled_pt_num, 3 + feature_len))).zero_()
pooled_empty_flag = torch.cuda.IntTensor(torch.Size((batch_size, boxes_num))).zero_()
roipool3d_cuda.forward(pts.contiguous(), pooled_boxes3d.contiguous(),
pts_feature.contiguous(), pooled_features, pooled_empty_flag)
return pooled_features, pooled_empty_flag
|
:param pts: (B, N, 3)
:param pts_feature: (B, N, C)
:param boxes3d: (B, M, 7)
:param pool_extra_width: float
:param sampled_pt_num: int
:return:
pooled_features: (B, M, 512, 3 + C)
pooled_empty_flag: (B, M)
|
roipool3d_gpu
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/roipool3d/roipool3d_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/roipool3d/roipool3d_utils.py
|
MIT
|
def pts_in_boxes3d_cpu(pts, boxes3d):
"""
:param pts: (N, 3) in rect-camera coords
:param boxes3d: (M, 7)
:return: boxes_pts_mask_list: (M), list with [(N), (N), ..]
"""
if not pts.is_cuda:
pts = pts.float().contiguous()
boxes3d = boxes3d.float().contiguous()
pts_flag = torch.LongTensor(torch.Size((boxes3d.size(0), pts.size(0)))) # (M, N)
roipool3d_cuda.pts_in_boxes3d_cpu(pts_flag, pts, boxes3d)
boxes_pts_mask_list = []
for k in range(0, boxes3d.shape[0]):
cur_mask = pts_flag[k] > 0
boxes_pts_mask_list.append(cur_mask)
return boxes_pts_mask_list
else:
raise NotImplementedError
|
:param pts: (N, 3) in rect-camera coords
:param boxes3d: (M, 7)
:return: boxes_pts_mask_list: (M), list with [(N), (N), ..]
|
pts_in_boxes3d_cpu
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/roipool3d/roipool3d_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/roipool3d/roipool3d_utils.py
|
MIT
|
def roipool_pc_cpu(pts, pts_feature, boxes3d, sampled_pt_num):
"""
:param pts: (N, 3)
:param pts_feature: (N, C)
:param boxes3d: (M, 7)
:param sampled_pt_num: int
:return:
"""
pts = pts.cpu().float().contiguous()
pts_feature = pts_feature.cpu().float().contiguous()
boxes3d = boxes3d.cpu().float().contiguous()
assert pts.shape[0] == pts_feature.shape[0] and pts.shape[1] == 3, '%s %s' % (pts.shape, pts_feature.shape)
assert pts.is_cuda is False
pooled_pts = torch.FloatTensor(torch.Size((boxes3d.shape[0], sampled_pt_num, 3))).zero_()
pooled_features = torch.FloatTensor(torch.Size((boxes3d.shape[0], sampled_pt_num, pts_feature.shape[1]))).zero_()
pooled_empty_flag = torch.LongTensor(boxes3d.shape[0]).zero_()
roipool3d_cuda.roipool3d_cpu(pts, boxes3d, pts_feature, pooled_pts, pooled_features, pooled_empty_flag)
return pooled_pts, pooled_features, pooled_empty_flag
|
:param pts: (N, 3)
:param pts_feature: (N, C)
:param boxes3d: (M, 7)
:param sampled_pt_num: int
:return:
|
roipool_pc_cpu
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/roipool3d/roipool3d_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/roipool3d/roipool3d_utils.py
|
MIT
|
def roipool3d_cpu(boxes3d, pts, pts_feature, pts_extra_input, pool_extra_width, sampled_pt_num=512,
canonical_transform=True):
"""
:param boxes3d: (N, 7)
:param pts: (N, 3)
:param pts_feature: (N, C)
:param pts_extra_input: (N, C2)
:param pool_extra_width: constant
:param sampled_pt_num: constant
:return:
"""
pooled_boxes3d = kitti_utils.enlarge_box3d(boxes3d, pool_extra_width)
pts_feature_all = np.concatenate((pts_extra_input, pts_feature), axis=1)
# Note: if pooled_empty_flag[i] > 0, the pooled_pts[i], pooled_features[i] will be zero
pooled_pts, pooled_features, pooled_empty_flag = \
roipool_pc_cpu(torch.from_numpy(pts), torch.from_numpy(pts_feature_all),
torch.from_numpy(pooled_boxes3d), sampled_pt_num)
extra_input_len = pts_extra_input.shape[1]
sampled_pts_input = torch.cat((pooled_pts, pooled_features[:, :, 0:extra_input_len]), dim=2).numpy()
sampled_pts_feature = pooled_features[:, :, extra_input_len:].numpy()
if canonical_transform:
# Translate to the roi coordinates
roi_ry = boxes3d[:, 6] % (2 * np.pi) # 0~2pi
roi_center = boxes3d[:, 0:3]
# shift to center
sampled_pts_input[:, :, 0:3] = sampled_pts_input[:, :, 0:3] - roi_center[:, np.newaxis, :]
for k in range(sampled_pts_input.shape[0]):
sampled_pts_input[k] = kitti_utils.rotate_pc_along_y(sampled_pts_input[k], roi_ry[k])
return sampled_pts_input, sampled_pts_feature
return sampled_pts_input, sampled_pts_feature, pooled_empty_flag.numpy()
|
:param boxes3d: (N, 7)
:param pts: (N, 3)
:param pts_feature: (N, C)
:param pts_extra_input: (N, C2)
:param pool_extra_width: constant
:param sampled_pt_num: constant
:return:
|
roipool3d_cpu
|
python
|
sshaoshuai/PointRCNN
|
lib/utils/roipool3d/roipool3d_utils.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/lib/utils/roipool3d/roipool3d_utils.py
|
MIT
|
def get_valid_flag(pts_rect, pts_img, pts_rect_depth, img_shape):
"""
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param pts_img:
:param pts_rect_depth:
:param img_shape:
:return:
"""
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
if PC_REDUCE_BY_RANGE:
x_range, y_range, z_range = PC_AREA_SCOPE
pts_x, pts_y, pts_z = pts_rect[:, 0], pts_rect[:, 1], pts_rect[:, 2]
range_flag = (pts_x >= x_range[0]) & (pts_x <= x_range[1]) \
& (pts_y >= y_range[0]) & (pts_y <= y_range[1]) \
& (pts_z >= z_range[0]) & (pts_z <= z_range[1])
pts_valid_flag = pts_valid_flag & range_flag
return pts_valid_flag
|
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param pts_img:
:param pts_rect_depth:
:param img_shape:
:return:
|
get_valid_flag
|
python
|
sshaoshuai/PointRCNN
|
tools/generate_aug_scene.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/generate_aug_scene.py
|
MIT
|
def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,
dt_num_idx:dt_num_idx + dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
|
fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
|
calculate_iou_partly
|
python
|
sshaoshuai/PointRCNN
|
tools/kitti_object_eval_python/eval.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/kitti_object_eval_python/eval.py
|
MIT
|
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=50):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
|
Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
|
eval_class
|
python
|
sshaoshuai/PointRCNN
|
tools/kitti_object_eval_python/eval.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/kitti_object_eval_python/eval.py
|
MIT
|
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
|
Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
|
area
|
python
|
sshaoshuai/PointRCNN
|
tools/kitti_object_eval_python/kitti_common.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/kitti_object_eval_python/kitti_common.py
|
MIT
|
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
|
Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
|
intersection
|
python
|
sshaoshuai/PointRCNN
|
tools/kitti_object_eval_python/kitti_common.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/kitti_object_eval_python/kitti_common.py
|
MIT
|
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
|
Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
|
iou
|
python
|
sshaoshuai/PointRCNN
|
tools/kitti_object_eval_python/kitti_common.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/kitti_object_eval_python/kitti_common.py
|
MIT
|
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
|
rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
|
rotate_iou_gpu_eval
|
python
|
sshaoshuai/PointRCNN
|
tools/kitti_object_eval_python/rotate_iou.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/kitti_object_eval_python/rotate_iou.py
|
MIT
|
def split_bn_bias(layer_groups):
"Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."
split_groups = []
for l in layer_groups:
l1, l2 = [], []
for c in l.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
|
Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups.
|
split_bn_bias
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def get_master(layer_groups, flat_master: bool = False):
"Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None: mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
else:
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp: param.requires_grad = True
return model_params, master_params
|
Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32.
|
get_master
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
"Copy the `model_params` gradients to `master_params` for the optimizer step."
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None: master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
|
Copy the `model_params` gradients to `master_params` for the optimizer step.
|
model_g2master_g
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def listify(p=None, q=None):
"Make `p` listy and the same length as `q`."
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, Iterable):
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1: p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
|
Make `p` listy and the same length as `q`.
|
listify
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def trainable_params(m: nn.Module):
"Return list of trainable params in `m`."
res = filter(lambda p: p.requires_grad, m.parameters())
return res
|
Return list of trainable params in `m`.
|
trainable_params
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def create(cls, opt_func, lr,
layer_groups, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
|
Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`.
|
create
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def new(self, layer_groups):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
|
Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters.
|
new
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def step(self) -> None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
|
Set weight decay and step optimizer.
|
step
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def clear(self):
"Reset the state of the inner optimizer."
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
|
Reset the state of the inner optimizer.
|
clear
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def beta(self, val: float) -> None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
|
Set beta (or alpha as makes sense for given optimizer).
|
beta
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def read_defaults(self) -> None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
|
Read the values inside the optimizer for the hyper-parameters.
|
read_defaults
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def set_val(self, key: str, val, bn_groups: bool = True):
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
|
Set `val` inside the optimizer dictionary at `key`.
|
set_val
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def read_val(self, key: str):
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
|
Read a hyperparameter `key` in the optimizer dictionary.
|
read_val
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def create(cls, opt_func, lr,
layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
# Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
|
Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`.
|
create
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/fastai_optim.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/fastai_optim.py
|
MIT
|
def annealing_cos(start, end, pct):
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
|
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
|
annealing_cos
|
python
|
sshaoshuai/PointRCNN
|
tools/train_utils/learning_schedules_fastai.py
|
https://github.com/sshaoshuai/PointRCNN/blob/master/tools/train_utils/learning_schedules_fastai.py
|
MIT
|
def extend_body_states(
self,
extend_body_pos: torch.Tensor,
extend_body_parent_ids: list[int],
):
"""
This function is for appending the link states to the robot state. For example, the H1 robot doesn't have hands
and a head in its robot state. However, we are still interested in computing its error and considering these as
important key points. Thus, we will use this function to add the head and hand states to the robot state.
Args:
extend_body_pos (torch.Tensor): Positions of the extended bodies relative to their parent bodies.
Shape is (num_envs, num_extended_bodies, 3).
extend_body_parent_ids (list[int]): List of parent body indices for each extended body.
Raises:
ValueError: If the number of extended bodies does not match the length of extend_body_parent_ids.
"""
if extend_body_pos.shape[1] != len(extend_body_parent_ids):
print("[INFO]: extend_body_pos shape:", extend_body_pos.shape)
print("[INFO]: extend_body_parent_ids lengths:", len(extend_body_parent_ids))
raise ValueError(
"Dimension mismatch: number of extended bodies does not match the length of its parent ID list."
)
num_envs = self.body_pos.shape[0]
# Compute extended body positions
extend_curr_pos = (
torch_utils.my_quat_rotate(
math_utils.convert_quat(self.body_rot[:, extend_body_parent_ids].reshape(-1, 4), to="xyzw"),
extend_body_pos[:,].reshape(-1, 3),
).view(num_envs, -1, 3)
+ self.body_pos[:, extend_body_parent_ids]
)
self.body_pos_extend = torch.cat([self.body_pos, extend_curr_pos], dim=1)
# Compute extended body orientations
extend_curr_rot = self.body_rot[:, extend_body_parent_ids].clone()
self.body_rot_extend = torch.cat([self.body_rot, extend_curr_rot], dim=1)
# Compute extended body linear velocities
self.body_lin_vel_extend = torch.cat(
[self.body_lin_vel, self.body_lin_vel[:, extend_body_parent_ids].clone()], dim=1
)
# Compute extended body angular velocities
self.body_ang_vel_extend = torch.cat(
[self.body_ang_vel, self.body_ang_vel[:, extend_body_parent_ids].clone()], dim=1
)
|
This function is for appending the link states to the robot state. For example, the H1 robot doesn't have hands
and a head in its robot state. However, we are still interested in computing its error and considering these as
important key points. Thus, we will use this function to add the head and hand states to the robot state.
Args:
extend_body_pos (torch.Tensor): Positions of the extended bodies relative to their parent bodies.
Shape is (num_envs, num_extended_bodies, 3).
extend_body_parent_ids (list[int]): List of parent body indices for each extended body.
Raises:
ValueError: If the number of extended bodies does not match the length of extend_body_parent_ids.
|
extend_body_states
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/body_state.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/body_state.py
|
Apache-2.0
|
def get_observations(self) -> torch.Tensor:
"""Gets policy observations for each environment based on the mode."""
if self._mode.is_distill_mode():
return self.get_student_observations()
return self.get_teacher_observations()
|
Gets policy observations for each environment based on the mode.
|
get_observations
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/environment_wrapper.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/environment_wrapper.py
|
Apache-2.0
|
def _init_empty_frames(self, frame: Frame):
"""Initialize empty frame buffers to store trajectory data for all environments.
Creates zero-filled tensors/arrays sized to hold the maximum possible number of frames
and environments, matching the data types and shapes of the input frame.
"""
max_possible_frames = max(self.max_frames_per_env).item()
max_possible_envs = len(self.max_frames_per_env)
for attr in vars(frame):
frame_data = getattr(frame, attr)
data_shape = frame_data.shape
if isinstance(frame_data, torch.Tensor):
self._frames[attr] = torch.zeros(
(max_possible_frames, max_possible_envs, *data_shape[1:]), device=frame_data.device
)
|
Initialize empty frame buffers to store trajectory data for all environments.
Creates zero-filled tensors/arrays sized to hold the maximum possible number of frames
and environments, matching the data types and shapes of the input frame.
|
_init_empty_frames
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def add_frame(self, frame: Frame):
"""Add a frame to each trajectory in the episode.
Args:
frame (Frame): Frame containing trajectory data for all environments at this timestep
"""
# Initialize frame buffers if this is the first frame being added
if len(self._frames) == 0:
self._init_empty_frames(frame)
# Store each frame attribute (body positions, joint angles etc) in the corresponding buffer
for attr in vars(frame):
# Get the data for this attribute from the frame
frame_data = getattr(frame, attr)
# Only store tensor data, skip other attributes
if isinstance(frame_data, torch.Tensor):
# Add the frame data at the current timestep index
self._frames[attr][self._num_frames_added] = frame_data
# Increment counter tracking number of frames added
self._num_frames_added += 1
|
Add a frame to each trajectory in the episode.
Args:
frame (Frame): Frame containing trajectory data for all environments at this timestep
|
add_frame
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def complete(self):
"""Aggregate frames into episode data more efficiently.
Instead of splitting data environment by environment, we can use tensor operations
to split all environments at once, significantly reducing loop overhead.
"""
num_envs = self.max_frames_per_env.shape[0]
for key, data in self._frames.items():
if isinstance(data, torch.Tensor):
setattr(self, key, [data[:, i] for i in range(num_envs)])
|
Aggregate frames into episode data more efficiently.
Instead of splitting data environment by environment, we can use tensor operations
to split all environments at once, significantly reducing loop overhead.
|
complete
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def filter(self, ids: list[int]) -> Episode:
"""Filter episode data to only include specified environment indices."""
# Create new empty episode to store filtered data
filtered = Episode(self.max_frames_per_env)
# Iterate through all attributes of this episode
for attr, data in vars(self).items():
# Only process attributes that are lists
if isinstance(data, list):
# For each attribute, keep only the trajectories at the specified indices
setattr(filtered, attr, [data[i] for i in ids])
return filtered
|
Filter episode data to only include specified environment indices.
|
filter
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def trim(self, terminated_frame: torch.Tensor, end_id: int):
"""Helper method to cut data based on terminated frame.
This function creates a new Episode object with truncated data. For each environment,
it keeps only the frames up to the termination point specified in terminated_frame.
It then further filters to keep only environments up to end_id.
Args:
terminated_frame: Tensor containing the frame index where each env terminated
end_id: Only keep environments up to this index
Returns:
A new Episode object with the trimmed data
"""
trimmed = Episode(self.max_frames_per_env)
for attr, data in vars(self).items():
if isinstance(data, list):
setattr(trimmed, attr, [data[i][: terminated_frame[i]] for i in range(len(data))][:end_id])
return trimmed
|
Helper method to cut data based on terminated frame.
This function creates a new Episode object with truncated data. For each environment,
it keeps only the frames up to the termination point specified in terminated_frame.
It then further filters to keep only environments up to end_id.
Args:
terminated_frame: Tensor containing the frame index where each env terminated
end_id: Only keep environments up to this index
Returns:
A new Episode object with the trimmed data
|
trim
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def update(
self,
episode: Episode,
episode_gt: Episode,
success_ids: list,
):
"""Update and compute metrics for trajectories from all simulation instances in one episode."""
self.num_motions += episode.num_envs
# First, compute metrics on trajectories from all instances.
self._compute_metrics(episode, episode_gt, self._all_metrics_by_episode, self._all_metrics_masked_by_episode)
if len(success_ids) == 0:
return
# Then, collect the trajectory from successful instances and compute metrics.
success_episodes = episode.filter(success_ids)
success_episodes_gt = episode_gt.filter(success_ids)
self._compute_metrics(
success_episodes,
success_episodes_gt,
self._success_metrics_by_episode,
self._success_metrics_masked_by_episode,
)
|
Update and compute metrics for trajectories from all simulation instances in one episode.
|
update
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _compute_link_metrics(
self,
body_pos: list[torch.Tensor],
body_pos_gt: list[torch.Tensor],
storage: dict[str, dict[str, list[float]]],
):
"""Compute metrics of trajectories and save them by their means and number of elements (as weights)."""
# compute_metrics_lite expects list of numpy arrays
body_pos_np = [body_pos_i.numpy() for body_pos_i in body_pos]
body_pos_gt_np = [body_pos_gt_i.numpy() for body_pos_gt_i in body_pos_gt]
metrics = compute_metrics_lite(body_pos_np, body_pos_gt_np)
for key, value in metrics.items():
self._record_metrics(key, np.mean(value).item(), value.size, storage)
|
Compute metrics of trajectories and save them by their means and number of elements (as weights).
|
_compute_link_metrics
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _compute_joint_metrics(
self,
episode: Episode,
episode_gt: Episode,
frame_weights: torch.Tensor,
storage: dict[str, dict[str, list[float]]],
):
"""Compute metrics of trajectories and save them by their means and number of elements (as weights)."""
self._compute_joint_tracking_error(
episode.upper_body_joint_pos,
episode_gt.upper_body_joint_pos,
frame_weights,
episode.num_envs,
storage,
"upper_body_joints_dist",
)
self._compute_joint_tracking_error(
episode.lower_body_joint_pos,
episode_gt.lower_body_joint_pos,
frame_weights,
episode.num_envs,
storage,
"lower_body_joints_dist",
)
|
Compute metrics of trajectories and save them by their means and number of elements (as weights).
|
_compute_joint_metrics
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def compute_joint_tracking_error(
joint_pos: torch.Tensor, joint_pos_gt: torch.Tensor, frame_weights: torch.Tensor, num_envs: int
) -> float:
"""Compute weighted mean absolute joint position error across environments.
For each environment:
1. Take absolute difference between predicted and ground truth joint positions
2. Weight the differences by frame_weights to normalize across varying trajectory lengths
3. Take mean across joints
Finally, sum across environments and divide by num_envs for mean error.
"""
return torch.sum(torch.mean(torch.abs(joint_pos - joint_pos_gt), dim=1) * frame_weights).item() / num_envs
|
Compute weighted mean absolute joint position error across environments.
For each environment:
1. Take absolute difference between predicted and ground truth joint positions
2. Weight the differences by frame_weights to normalize across varying trajectory lengths
3. Take mean across joints
Finally, sum across environments and divide by num_envs for mean error.
|
compute_joint_tracking_error
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def compute_height_error(
root_pos: torch.Tensor, root_pos_gt: torch.Tensor, frame_weights: torch.Tensor, num_envs: int
) -> float:
"""Compute weighted mean absolute height error across environments.
For each environment:
1. Takes absolute difference between predicted and ground truth root z-coordinates
2. Weights the differences by frame_weights to normalize across varying trajectory lengths
3. Takes mean across frames
Finally, sum across environments and divide by num_envs for mean error.
"""
return torch.sum(torch.abs(root_pos[:, 2] - root_pos_gt[:, 2]) * frame_weights).item() / num_envs
|
Compute weighted mean absolute height error across environments.
For each environment:
1. Takes absolute difference between predicted and ground truth root z-coordinates
2. Weights the differences by frame_weights to normalize across varying trajectory lengths
3. Takes mean across frames
Finally, sum across environments and divide by num_envs for mean error.
|
compute_height_error
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def compute_vel_error(
vel: torch.Tensor,
rot: torch.Tensor,
vel_gt: torch.Tensor,
rot_gt: torch.Tensor,
frame_weights: torch.Tensor,
num_envs: int,
) -> float:
"""Compute weighted mean velocity tracking error across environments.
For each environment:
1. Convert velocities to local frame using inverse rotation
2. Take L2 norm of difference between predicted and ground truth local velocities
3. Weight by frame_weights and average across frames
Finally, sum across environments and divide by num_envs for mean error.
"""
local_vel_gt = math_utils.quat_rotate_inverse(rot_gt, vel_gt)
local_vel = math_utils.quat_rotate_inverse(rot, vel)
return torch.sum(torch.linalg.norm(local_vel - local_vel_gt, dim=-1) * frame_weights).item() / num_envs
|
Compute weighted mean velocity tracking error across environments.
For each environment:
1. Convert velocities to local frame using inverse rotation
2. Take L2 norm of difference between predicted and ground truth local velocities
3. Weight by frame_weights and average across frames
Finally, sum across environments and divide by num_envs for mean error.
|
compute_vel_error
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _compute_root_rot_tracking_error(
self,
root_rot: torch.Tensor,
root_rot_gt: torch.Tensor,
frame_weights: torch.Tensor,
num_envs: int,
storage: dict[str, dict[str, list[float]]],
):
"""Compute root rotation tracking error.
Args:
root_rot: Root rotation quaternions
root_rot_gt: Ground truth root rotation quaternions
Returns:
dict: Dictionary containing roll, pitch, yaw errors
"""
@torch.jit.script
def compute_rot_tracking_error(
quat1: torch.Tensor, quat2: torch.Tensor, frame_weights: torch.Tensor, num_envs: int
) -> tuple[float, float, float]:
"""Compute weighted mean rotation tracking error across environments.
For each environment:
1. Compute quaternion difference between predicted and ground truth rotations
2. Convert difference quaternion to Euler angles (roll, pitch, yaw)
3. Take absolute value of angles and weight by frame_weights
4. Average across frames
Finally, sum across environments and divide by num_envs for mean error.
Returns:
tuple[float, float, float]: Mean roll, pitch and yaw errors in radians
"""
quat_diff = math_utils.quat_mul(quat1, math_utils.quat_conjugate(quat2))
roll, pitch, yaw = math_utils.euler_xyz_from_quat(quat_diff)
roll_error = torch.sum(torch.abs(roll) * frame_weights).item() / num_envs
pitch_error = torch.sum(torch.abs(pitch) * frame_weights).item() / num_envs
yaw_error = torch.sum(torch.abs(yaw) * frame_weights).item() / num_envs
return roll_error, pitch_error, yaw_error
roll_error, pitch_error, yaw_error = compute_rot_tracking_error(root_rot, root_rot_gt, frame_weights, num_envs)
self._record_metrics("root_r_error", roll_error, num_envs, storage)
self._record_metrics("root_p_error", pitch_error, num_envs, storage)
self._record_metrics("root_y_error", yaw_error, num_envs, storage)
|
Compute root rotation tracking error.
Args:
root_rot: Root rotation quaternions
root_rot_gt: Ground truth root rotation quaternions
Returns:
dict: Dictionary containing roll, pitch, yaw errors
|
_compute_root_rot_tracking_error
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def compute_rot_tracking_error(
quat1: torch.Tensor, quat2: torch.Tensor, frame_weights: torch.Tensor, num_envs: int
) -> tuple[float, float, float]:
"""Compute weighted mean rotation tracking error across environments.
For each environment:
1. Compute quaternion difference between predicted and ground truth rotations
2. Convert difference quaternion to Euler angles (roll, pitch, yaw)
3. Take absolute value of angles and weight by frame_weights
4. Average across frames
Finally, sum across environments and divide by num_envs for mean error.
Returns:
tuple[float, float, float]: Mean roll, pitch and yaw errors in radians
"""
quat_diff = math_utils.quat_mul(quat1, math_utils.quat_conjugate(quat2))
roll, pitch, yaw = math_utils.euler_xyz_from_quat(quat_diff)
roll_error = torch.sum(torch.abs(roll) * frame_weights).item() / num_envs
pitch_error = torch.sum(torch.abs(pitch) * frame_weights).item() / num_envs
yaw_error = torch.sum(torch.abs(yaw) * frame_weights).item() / num_envs
return roll_error, pitch_error, yaw_error
|
Compute weighted mean rotation tracking error across environments.
For each environment:
1. Compute quaternion difference between predicted and ground truth rotations
2. Convert difference quaternion to Euler angles (roll, pitch, yaw)
3. Take absolute value of angles and weight by frame_weights
4. Average across frames
Finally, sum across environments and divide by num_envs for mean error.
Returns:
tuple[float, float, float]: Mean roll, pitch and yaw errors in radians
|
compute_rot_tracking_error
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _record_metrics(self, name: str, mean: float, weight: int, storage: dict[str, dict[str, list[float]]]):
"""Record metrics by their means and number of elements (as weights)."""
if name not in storage:
storage[name] = {"means": [], "weights": []}
storage[name]["means"].append(mean)
storage[name]["weights"].append(weight)
|
Record metrics by their means and number of elements (as weights).
|
_record_metrics
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def conclude(self):
"""At the end of the evaluation, computes the metrics over all tasks."""
self.all_metrics = {
key: np.average(value["means"], weights=value["weights"])
for key, value in self._all_metrics_by_episode.items()
}
self.success_metrics = {
key: np.average(value["means"], weights=value["weights"])
for key, value in self._success_metrics_by_episode.items()
}
self.all_metrics_masked = {
key: np.average(value["means"], weights=value["weights"])
for key, value in self._all_metrics_masked_by_episode.items()
}
self.success_metrics_masked = {
key: np.average(value["means"], weights=value["weights"])
for key, value in self._success_metrics_masked_by_episode.items()
}
|
At the end of the evaluation, computes the metrics over all tasks.
|
conclude
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def save(self, directory: str):
"""Saves metrics to a time-stamped json file in ``directory``.
Args:
directory (str): Directory to stored the file to.
"""
file_dir = Path(directory)
file_dir.mkdir(parents=True, exist_ok=True)
timestamp = time.strftime("%Y%m%d-%H%M%S")
file_name = f"{timestamp}.json"
file_dir.joinpath(file_name)
content = {
"num_motions": self.num_motions,
"success_rate": self.success_rate,
"all": self.all_metrics,
"success": self.success_metrics,
"all_masked": self.all_metrics_masked,
"success_masked": self.success_metrics_masked,
}
with open(file_dir.joinpath(file_name), "w") as fh:
json.dump(content, fh)
|
Saves metrics to a time-stamped json file in ``directory``.
Args:
directory (str): Directory to stored the file to.
|
save
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def __init__(
self,
env_wrapper: EnvironmentWrapper,
metrics_path: str | None = None,
):
"""Initializes the evaluator.
Args:
env_wrapper (EnvironmentWrapper): The environment that the evaluation is taking place.
metrics_path (str | None, optional): The directory that the metrics will be saved to. Defaults to None.
"""
self._num_envs = env_wrapper.num_envs
self._device = env_wrapper.device
self._ref_motion_mgr = env_wrapper.reference_motion_manager
self._ref_motion_start_id = 0
self._num_unique_ref_motions = self._ref_motion_mgr.num_unique_motions
self._ref_motion_frames = self._ref_motion_mgr.get_motion_num_steps()
self._metrics = MotionTrackingMetrics()
self._metrics_path = metrics_path
# Episode data
self._terminated = torch.zeros(self._num_envs, dtype=torch.bool, device=self._device)
self._terminated_frame = self._ref_motion_frames.detach().clone()
self._failed = torch.zeros((self._num_unique_ref_motions), dtype=torch.bool, device=self._device)
self._episode = Episode(max_frames_per_env=self._ref_motion_frames)
self._episode_gt = Episode(max_frames_per_env=self._ref_motion_frames)
# Status
self._pbar = tqdm(range(self._num_unique_ref_motions // self._num_envs), position=0, leave=True)
self._curr_steps = 0
self._num_episodes = 0
|
Initializes the evaluator.
Args:
env_wrapper (EnvironmentWrapper): The environment that the evaluation is taking place.
metrics_path (str | None, optional): The directory that the metrics will be saved to. Defaults to None.
|
__init__
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def collect(self, dones: torch.Tensor, info: dict) -> bool:
"""Collects data from a step and updates internal states.
Args:
dones (torch.Tensor): environments that are terminated (failed) or truncated (timed out).
info (dict): Extra information collected from a step.
Returns:
bool: Whether all current reference motions are evaluated and that all environments need a reset.
"""
self._curr_steps += 1
# Get the environments that terminated at the most recent step
newly_terminated = torch.logical_and(~self._terminated, dones)
self._collect_step_data(newly_terminated, info=info)
self._terminated_frame[newly_terminated] = self._curr_steps
self._terminated = torch.logical_or(self._terminated, dones)
update_str = self._update_status_bar()
if self._terminated.sum() == self._num_envs:
self._aggregate_data()
self._num_episodes += self._num_envs
print(update_str)
return True
return False
|
Collects data from a step and updates internal states.
Args:
dones (torch.Tensor): environments that are terminated (failed) or truncated (timed out).
info (dict): Extra information collected from a step.
Returns:
bool: Whether all current reference motions are evaluated and that all environments need a reset.
|
collect
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _collect_step_data(self, newly_terminated: torch.Tensor, info: dict):
"""Collects data after each step.
Args:
newly_terminated(torch.Tensor(bool)): Newly terminated env
info (dict): Extra information collected from a step.
"""
state_data = info["data"]["state"]
ground_truth_data = info["data"]["ground_truth"]
body_pos = state_data["body_pos"]
num_envs, num_bodies, _ = body_pos.shape
mask = info["data"]["mask"]
body_mask = mask[:, :num_bodies]
body_mask = body_mask.unsqueeze(-1)
body_mask_expanded = body_mask.expand(num_envs, num_bodies, 3)
upper_body_joint_ids = info["data"]["upper_joint_ids"]
lower_body_joint_ids = info["data"]["lower_joint_ids"]
frame = self._build_frame(state_data, body_mask_expanded, num_envs, upper_body_joint_ids, lower_body_joint_ids)
frame_gt = self._build_frame(
ground_truth_data, body_mask_expanded, num_envs, upper_body_joint_ids, lower_body_joint_ids
)
self._update_failure_metrics(newly_terminated, info)
self._episode.add_frame(frame)
self._episode_gt.add_frame(frame_gt)
|
Collects data after each step.
Args:
newly_terminated(torch.Tensor(bool)): Newly terminated env
info (dict): Extra information collected from a step.
|
_collect_step_data
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _build_frame(
self, data: dict, mask: torch.Tensor, num_envs: int, upper_joint_ids: list, lower_joint_ids: list
) -> Frame:
"""Builds a frame from the data and mask.
Args:
data (dict): Dictionary containing trajectory data including body positions, joint positions, etc.
mask (torch.Tensor): Boolean mask array indicating which bodies to include in masked data.
num_envs (int): Number of environments.
upper_joint_ids (list): List of indices for upper body joints.
lower_joint_ids (list): List of indices for lower body joints.
Returns:
Frame: A Frame object containing the processed trajectory data.
"""
if torch.any(mask):
data["body_pos_masked"] = data["body_pos"][mask].reshape(num_envs, -1, 3)
else:
data["body_pos_masked"] = None
joint_pos = data.pop("joint_pos")
data["upper_body_joint_pos"] = joint_pos[:, upper_joint_ids]
data["lower_body_joint_pos"] = joint_pos[:, lower_joint_ids]
return Frame.from_dict(data)
|
Builds a frame from the data and mask.
Args:
data (dict): Dictionary containing trajectory data including body positions, joint positions, etc.
mask (torch.Tensor): Boolean mask array indicating which bodies to include in masked data.
num_envs (int): Number of environments.
upper_joint_ids (list): List of indices for upper body joints.
lower_joint_ids (list): List of indices for lower body joints.
Returns:
Frame: A Frame object containing the processed trajectory data.
|
_build_frame
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _update_failure_metrics(self, newly_terminated: torch.Tensor, info: dict):
"""Updates failure metrics based on termination conditions."""
start_id = self._ref_motion_start_id
end_id = min(self._ref_motion_start_id + self._num_envs, self._num_unique_ref_motions)
counted_envs = end_id - start_id
# Get failure conditions excluding reference motion length
failed_conditions = torch.stack(
[
v[:counted_envs].flatten()
for k, v in info["termination_conditions"].items()
if k != "reference_motion_length"
]
)
# Update failed environments
self._failed[start_id:end_id] |= torch.logical_and(
newly_terminated[:counted_envs], torch.any(failed_conditions, dim=0)
)
self._metrics.success_rate = 1 - torch.sum(self._failed).item() / end_id
|
Updates failure metrics based on termination conditions.
|
_update_failure_metrics
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _reset_data_buffer(self):
"""Resets data buffer for new episodes."""
self._terminated[:] = False
self._pbar.update(1)
self._pbar.refresh()
self._ref_motion_frames = self._ref_motion_mgr.get_motion_num_steps()
self._episode = Episode(max_frames_per_env=self._ref_motion_frames)
self._episode_gt = Episode(max_frames_per_env=self._ref_motion_frames)
self._terminated_frame = self._ref_motion_frames.detach().clone()
self._curr_steps = 0
|
Resets data buffer for new episodes.
|
_reset_data_buffer
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def _update_status_bar(self):
"""Updates status bar in the console to display current progress and selected metrics."""
update_str = (
f"Terminated: {self._terminated.sum().item()} | max frames: {self._ref_motion_frames.max()} | steps"
f" {self._curr_steps} | Start: {self._ref_motion_start_id} | Succ rate: {self._metrics.success_rate:.3f} |"
f" Total failures: {self._failed.sum().item()} "
)
self._pbar.set_description(update_str)
return update_str
|
Updates status bar in the console to display current progress and selected metrics.
|
_update_status_bar
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def conclude(self):
"""Concludes evaluation by computing, printing and optionally saving metrics."""
self._pbar.close()
self._metrics.conclude()
self._metrics.print()
if self._metrics_path:
self._metrics.save(self._metrics_path)
|
Concludes evaluation by computing, printing and optionally saving metrics.
|
conclude
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def forward_motion_samples(self):
"""Steps forward in the list of reference motions.
All simulated environments must be reset following this function call.
"""
self._ref_motion_start_id += self._num_envs
self._ref_motion_mgr.load_motions(random_sample=False, start_idx=self._ref_motion_start_id)
self._reset_data_buffer()
|
Steps forward in the list of reference motions.
All simulated environments must be reset following this function call.
|
forward_motion_samples
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/evaluator.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/evaluator.py
|
Apache-2.0
|
def create_mask_element_names(body_names: list[str], joint_names: list[str]):
"""Get a name for each element of the mask."""
body_names = [name + "_local_pos_" for name in body_names]
joint_names = [name + "_joint_pos" for name in joint_names]
root_reference_names = [
"root_linear_velocity_x",
"root_linear_velocity_y",
"root_linear_velocity_z",
"root_orientation_roll",
"root_orientation_pitch",
"root_orientation_yaw_delta",
"root_height",
]
return body_names + joint_names + root_reference_names
|
Get a name for each element of the mask.
|
create_mask_element_names
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/mask.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/mask.py
|
Apache-2.0
|
def create_mask(
num_envs: int,
mask_element_names: list[str],
mask_modes: dict[str, dict[str, list[str]]],
enable_sparsity_randomization: bool,
device: torch.device,
) -> torch.Tensor:
"""
Create a mask where all enabled states are set to 1.
This mask can be used directly or multiplied with 0.5 and then be used as the probability of
a state being enabled.
Args:
mask_element_names: The name corresponding to every element in the mask.
mask_modes: A nested dictionary configuring which mask elements are enabled in which mode.
The nested dictionary takes the form `{mode_name: {body_region: [element_names]}}`
The `mode_name` and `body_region` can be chosen freely, they are just for documentary
value. The `element_names` may also use regex patterns.
An example configuration is:
"exbody": {
"upper_body": [".*torso_joint.*", ".*shoulder.*joint.*", ".*elbow.*joint.*"],
"lower_body": ["root.*"],
},
For every `mode_name`, all elements of all body regions in that mode will be enabled.
Most likely you want the elements specified in one body region to not
overlap with other body regions (the code will still work if they do overlap, but the
resulting trained policy may not work when the overlapping elements are disabled at test
time).
enable_sparsity_randomization: If enabled random elements of the mask will be disabled with
probability 0.5. The current randomization strategy aligns with the paper, but can be improved
to avoid motion ambiguity. For example, one viable strategy is to only enable the mask dropout
on a small portion of elements of the same body region.
Returns:
torch.Tensor: A tensor of shape (len(goal_state_names),) containing the mask.
"""
# First we do the mode masking.
mask_length = len(mask_element_names)
mask = torch.zeros((num_envs, mask_length), dtype=torch.bool, device=device)
modes = list(mask_modes.keys())
# Pre-compute indices for all modes and patterns
mode_to_indices = {}
for mode in modes:
all_indices = []
for _, goal_state_patterns in mask_modes[mode].items():
indices = get_matching_indices(goal_state_patterns, mask_element_names)
all_indices.extend(indices)
mode_to_indices[mode] = torch.tensor(sorted(set(all_indices)), dtype=torch.long, device=device)
# Random select mode for each environment
# Shape:1 x num_envs
selected_mode_indices = torch.randint(0, len(modes), (num_envs,), device=device)
for mode_idx, mode in enumerate(modes):
# Using tensor operations for boolean selection
mode_env_indices = (selected_mode_indices == mode_idx).nonzero(as_tuple=True)[0]
if mode_env_indices.numel() > 0:
indices = mode_to_indices[mode]
mask[mode_env_indices.unsqueeze(1), indices] = True
# Last we do the sparsity masking.
if enable_sparsity_randomization:
# Multiply by 0.5 to make it a probability.
mask = torch.bernoulli(mask * 0.5).bool()
return mask
|
Create a mask where all enabled states are set to 1.
This mask can be used directly or multiplied with 0.5 and then be used as the probability of
a state being enabled.
Args:
mask_element_names: The name corresponding to every element in the mask.
mask_modes: A nested dictionary configuring which mask elements are enabled in which mode.
The nested dictionary takes the form `{mode_name: {body_region: [element_names]}}`
The `mode_name` and `body_region` can be chosen freely, they are just for documentary
value. The `element_names` may also use regex patterns.
An example configuration is:
"exbody": {
"upper_body": [".*torso_joint.*", ".*shoulder.*joint.*", ".*elbow.*joint.*"],
"lower_body": ["root.*"],
},
For every `mode_name`, all elements of all body regions in that mode will be enabled.
Most likely you want the elements specified in one body region to not
overlap with other body regions (the code will still work if they do overlap, but the
resulting trained policy may not work when the overlapping elements are disabled at test
time).
enable_sparsity_randomization: If enabled random elements of the mask will be disabled with
probability 0.5. The current randomization strategy aligns with the paper, but can be improved
to avoid motion ambiguity. For example, one viable strategy is to only enable the mask dropout
on a small portion of elements of the same body region.
Returns:
torch.Tensor: A tensor of shape (len(goal_state_names),) containing the mask.
|
create_mask
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/mask.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/mask.py
|
Apache-2.0
|
def scale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""Normalizes a given input tensor to a range of [-1, 1].
.. note::
It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape is (N, dims) or (dims,).
upper: The maximum value of the tensor. Shape is (N, dims) or (dims,).
Returns:
Normalized transform of the tensor. Shape is (N, dims).
"""
# default value of center
offset = (lower + upper) * 0.5
# return normalized tensor
return 2 * (x - offset) / (upper - lower)
|
Normalizes a given input tensor to a range of [-1, 1].
.. note::
It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape is (N, dims) or (dims,).
upper: The maximum value of the tensor. Shape is (N, dims) or (dims,).
Returns:
Normalized transform of the tensor. Shape is (N, dims).
|
scale_transform
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def unscale_transform(x: torch.Tensor, lower: torch.Tensor, upper: torch.Tensor) -> torch.Tensor:
"""De-normalizes a given input tensor from range of [-1, 1] to (lower, upper).
.. note::
It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape is (N, dims) or (dims,).
upper: The maximum value of the tensor. Shape is (N, dims) or (dims,).
Returns:
De-normalized transform of the tensor. Shape is (N, dims).
"""
# default value of center
offset = (lower + upper) * 0.5
# return normalized tensor
return x * (upper - lower) * 0.5 + offset
|
De-normalizes a given input tensor from range of [-1, 1] to (lower, upper).
.. note::
It uses pytorch broadcasting functionality to deal with batched input.
Args:
x: Input tensor of shape (N, dims).
lower: The minimum value of the tensor. Shape is (N, dims) or (dims,).
upper: The maximum value of the tensor. Shape is (N, dims) or (dims,).
Returns:
De-normalized transform of the tensor. Shape is (N, dims).
|
unscale_transform
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def wrap_to_pi(angles: torch.Tensor) -> torch.Tensor:
r"""Wraps input angles (in radians) to the range :math:`[-\pi, \pi]`.
This function wraps angles in radians to the range :math:`[-\pi, \pi]`, such that
:math:`\pi` maps to :math:`\pi`, and :math:`-\pi` maps to :math:`-\pi`. In general,
odd positive multiples of :math:`\pi` are mapped to :math:`\pi`, and odd negative
multiples of :math:`\pi` are mapped to :math:`-\pi`.
The function behaves similar to MATLAB's `wrapToPi <https://www.mathworks.com/help/map/ref/wraptopi.html>`_
function.
Args:
angles: Input angles of any shape.
Returns:
Angles in the range :math:`[-\pi, \pi]`.
"""
# wrap to [0, 2*pi)
wrapped_angle = (angles + torch.pi) % (2 * torch.pi)
# map to [-pi, pi]
# we check for zero in wrapped angle to make it go to pi when input angle is odd multiple of pi
return torch.where((wrapped_angle == 0) & (angles > 0), torch.pi, wrapped_angle - torch.pi)
|
Wraps input angles (in radians) to the range :math:`[-\pi, \pi]`.
This function wraps angles in radians to the range :math:`[-\pi, \pi]`, such that
:math:`\pi` maps to :math:`\pi`, and :math:`-\pi` maps to :math:`-\pi`. In general,
odd positive multiples of :math:`\pi` are mapped to :math:`\pi`, and odd negative
multiples of :math:`\pi` are mapped to :math:`-\pi`.
The function behaves similar to MATLAB's `wrapToPi <https://www.mathworks.com/help/map/ref/wraptopi.html>`_
function.
Args:
angles: Input angles of any shape.
Returns:
Angles in the range :math:`[-\pi, \pi]`.
|
wrap_to_pi
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def copysign(mag: float, other: torch.Tensor) -> torch.Tensor:
"""Create a new floating-point tensor with the magnitude of input and the sign of other, element-wise.
Note:
The implementation follows from `torch.copysign`. The function allows a scalar magnitude.
Args:
mag: The magnitude scalar.
other: The tensor containing values whose signbits are applied to magnitude.
Returns:
The output tensor.
"""
mag_torch = torch.tensor(mag, device=other.device, dtype=torch.float).repeat(other.shape[0])
return torch.abs(mag_torch) * torch.sign(other)
|
Create a new floating-point tensor with the magnitude of input and the sign of other, element-wise.
Note:
The implementation follows from `torch.copysign`. The function allows a scalar magnitude.
Args:
mag: The magnitude scalar.
other: The tensor containing values whose signbits are applied to magnitude.
Returns:
The output tensor.
|
copysign
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def matrix_from_quat(quaternions: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: The quaternion orientation in (w, x, y, z). Shape is (..., 4).
Returns:
Rotation matrices. The shape is (..., 3, 3).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L41-L70
"""
r, i, j, k = torch.unbind(quaternions, -1)
# pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`.
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
|
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: The quaternion orientation in (w, x, y, z). Shape is (..., 4).
Returns:
Rotation matrices. The shape is (..., 3, 3).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L41-L70
|
matrix_from_quat
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def convert_quat(quat: torch.Tensor | np.ndarray, to: Literal["xyzw", "wxyz"] = "xyzw") -> torch.Tensor | np.ndarray:
"""Converts quaternion from one convention to another.
The convention to convert TO is specified as an optional argument. If to == 'xyzw',
then the input is in 'wxyz' format, and vice-versa.
Args:
quat: The quaternion of shape (..., 4).
to: Convention to convert quaternion to.. Defaults to "xyzw".
Returns:
The converted quaternion in specified convention.
Raises:
ValueError: Invalid input argument `to`, i.e. not "xyzw" or "wxyz".
ValueError: Invalid shape of input `quat`, i.e. not (..., 4,).
"""
# check input is correct
if quat.shape[-1] != 4:
msg = f"Expected input quaternion shape mismatch: {quat.shape} != (..., 4)."
raise ValueError(msg)
if to not in ["xyzw", "wxyz"]:
msg = f"Expected input argument `to` to be 'xyzw' or 'wxyz'. Received: {to}."
raise ValueError(msg)
# check if input is numpy array (we support this backend since some classes use numpy)
if isinstance(quat, np.ndarray):
# use numpy functions
if to == "xyzw":
# wxyz -> xyzw
return np.roll(quat, -1, axis=-1)
else:
# xyzw -> wxyz
return np.roll(quat, 1, axis=-1)
else:
# convert to torch (sanity check)
if not isinstance(quat, torch.Tensor):
quat = torch.tensor(quat, dtype=float)
# convert to specified quaternion type
if to == "xyzw":
# wxyz -> xyzw
return quat.roll(-1, dims=-1)
else:
# xyzw -> wxyz
return quat.roll(1, dims=-1)
|
Converts quaternion from one convention to another.
The convention to convert TO is specified as an optional argument. If to == 'xyzw',
then the input is in 'wxyz' format, and vice-versa.
Args:
quat: The quaternion of shape (..., 4).
to: Convention to convert quaternion to.. Defaults to "xyzw".
Returns:
The converted quaternion in specified convention.
Raises:
ValueError: Invalid input argument `to`, i.e. not "xyzw" or "wxyz".
ValueError: Invalid shape of input `quat`, i.e. not (..., 4,).
|
convert_quat
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_conjugate(q: torch.Tensor) -> torch.Tensor:
"""Computes the conjugate of a quaternion.
Args:
q: The quaternion orientation in (w, x, y, z). Shape is (..., 4).
Returns:
The conjugate quaternion in (w, x, y, z). Shape is (..., 4).
"""
shape = q.shape
q = q.reshape(-1, 4)
return torch.cat((q[:, 0:1], -q[:, 1:]), dim=-1).view(shape)
|
Computes the conjugate of a quaternion.
Args:
q: The quaternion orientation in (w, x, y, z). Shape is (..., 4).
Returns:
The conjugate quaternion in (w, x, y, z). Shape is (..., 4).
|
quat_conjugate
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_from_euler_xyz(roll: torch.Tensor, pitch: torch.Tensor, yaw: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as Euler angles in radians to Quaternions.
Note:
The euler angles are assumed in XYZ convention.
Args:
roll: Rotation around x-axis (in radians). Shape is (N,).
pitch: Rotation around y-axis (in radians). Shape is (N,).
yaw: Rotation around z-axis (in radians). Shape is (N,).
Returns:
The quaternion in (w, x, y, z). Shape is (N, 4).
"""
cy = torch.cos(yaw * 0.5)
sy = torch.sin(yaw * 0.5)
cr = torch.cos(roll * 0.5)
sr = torch.sin(roll * 0.5)
cp = torch.cos(pitch * 0.5)
sp = torch.sin(pitch * 0.5)
# compute quaternion
qw = cy * cr * cp + sy * sr * sp
qx = cy * sr * cp - sy * cr * sp
qy = cy * cr * sp + sy * sr * cp
qz = sy * cr * cp - cy * sr * sp
return torch.stack([qw, qx, qy, qz], dim=-1)
|
Convert rotations given as Euler angles in radians to Quaternions.
Note:
The euler angles are assumed in XYZ convention.
Args:
roll: Rotation around x-axis (in radians). Shape is (N,).
pitch: Rotation around y-axis (in radians). Shape is (N,).
yaw: Rotation around z-axis (in radians). Shape is (N,).
Returns:
The quaternion in (w, x, y, z). Shape is (N, 4).
|
quat_from_euler_xyz
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor:
"""Returns torch.sqrt(torch.max(0, x)) but with a zero sub-gradient where x is 0.
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L91-L99
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
|
Returns torch.sqrt(torch.max(0, x)) but with a zero sub-gradient where x is 0.
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L91-L99
|
_sqrt_positive_part
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_from_matrix(matrix: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as rotation matrices to quaternions.
Args:
matrix: The rotation matrices. Shape is (..., 3, 3).
Returns:
The quaternion in (w, x, y, z). Shape is (..., 4).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L102-L161
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.")
batch_dim = matrix.shape[:-2]
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(matrix.reshape(batch_dim + (9,)), dim=-1)
q_abs = _sqrt_positive_part(
torch.stack(
[
1.0 + m00 + m11 + m22,
1.0 + m00 - m11 - m22,
1.0 - m00 + m11 - m22,
1.0 - m00 - m11 + m22,
],
dim=-1,
)
)
# we produce the desired quaternion multiplied by each of r, i, j, k
quat_by_rijk = torch.stack(
[
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),
# pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),
],
dim=-2,
)
# We floor here at 0.1 but the exact level is not important; if q_abs is small,
# the candidate won't be picked.
flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)
quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))
# if not for numerical problems, quat_candidates[i] should be same (up to a sign),
# forall i; we pick the best-conditioned one (with the largest denominator)
return quat_candidates[torch.nn.functional.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :].reshape(
batch_dim + (4,)
)
|
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: The rotation matrices. Shape is (..., 3, 3).
Returns:
The quaternion in (w, x, y, z). Shape is (..., 4).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L102-L161
|
quat_from_matrix
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def _axis_angle_rotation(axis: Literal["X", "Y", "Z"], angle: torch.Tensor) -> torch.Tensor:
"""Return the rotation matrices for one of the rotations about an axis of which Euler angles describe,
for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: Euler angles in radians of any shape.
Returns:
Rotation matrices. Shape is (..., 3, 3).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L164-L191
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
elif axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
elif axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
else:
raise ValueError("letter must be either X, Y or Z.")
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
|
Return the rotation matrices for one of the rotations about an axis of which Euler angles describe,
for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: Euler angles in radians of any shape.
Returns:
Rotation matrices. Shape is (..., 3, 3).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L164-L191
|
_axis_angle_rotation
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def matrix_from_euler(euler_angles: torch.Tensor, convention: str) -> torch.Tensor:
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians. Shape is (..., 3).
convention: Convention string of three uppercase letters from {"X", "Y", and "Z"}.
For example, "XYZ" means that the rotations should be applied first about x,
then y, then z.
Returns:
Rotation matrices. Shape is (..., 3, 3).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L194-L220
"""
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = [_axis_angle_rotation(c, e) for c, e in zip(convention, torch.unbind(euler_angles, -1))]
# return functools.reduce(torch.matmul, matrices)
return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2])
|
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians. Shape is (..., 3).
convention: Convention string of three uppercase letters from {"X", "Y", and "Z"}.
For example, "XYZ" means that the rotations should be applied first about x,
then y, then z.
Returns:
Rotation matrices. Shape is (..., 3, 3).
Reference:
https://github.com/facebookresearch/pytorch3d/blob/main/pytorch3d/transforms/rotation_conversions.py#L194-L220
|
matrix_from_euler
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def euler_xyz_from_quat(quat: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Convert rotations given as quaternions to Euler angles in radians.
Note:
The euler angles are assumed in XYZ convention.
Args:
quat: The quaternion orientation in (w, x, y, z). Shape is (N, 4).
Returns:
A tuple containing roll-pitch-yaw. Each element is a tensor of shape (N,).
Reference:
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
"""
q_w, q_x, q_y, q_z = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]
# roll (x-axis rotation)
sin_roll = 2.0 * (q_w * q_x + q_y * q_z)
cos_roll = 1 - 2 * (q_x * q_x + q_y * q_y)
roll = torch.atan2(sin_roll, cos_roll)
# pitch (y-axis rotation)
sin_pitch = 2.0 * (q_w * q_y - q_z * q_x)
pitch = torch.where(torch.abs(sin_pitch) >= 1, copysign(torch.pi / 2.0, sin_pitch), torch.asin(sin_pitch))
# yaw (z-axis rotation)
sin_yaw = 2.0 * (q_w * q_z + q_x * q_y)
cos_yaw = 1 - 2 * (q_y * q_y + q_z * q_z)
yaw = torch.atan2(sin_yaw, cos_yaw)
return wrap_to_pi(roll), wrap_to_pi(pitch), wrap_to_pi(yaw)
|
Convert rotations given as quaternions to Euler angles in radians.
Note:
The euler angles are assumed in XYZ convention.
Args:
quat: The quaternion orientation in (w, x, y, z). Shape is (N, 4).
Returns:
A tuple containing roll-pitch-yaw. Each element is a tensor of shape (N,).
Reference:
https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
|
euler_xyz_from_quat
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_mul(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
"""Multiply two quaternions together.
Args:
q1: The first quaternion in (w, x, y, z). Shape is (..., 4).
q2: The second quaternion in (w, x, y, z). Shape is (..., 4).
Returns:
The product of the two quaternions in (w, x, y, z). Shape is (..., 4).
Raises:
ValueError: Input shapes of ``q1`` and ``q2`` are not matching.
"""
# check input is correct
if q1.shape != q2.shape:
msg = f"Expected input quaternion shape mismatch: {q1.shape} != {q2.shape}."
raise ValueError(msg)
# reshape to (N, 4) for multiplication
shape = q1.shape
q1 = q1.reshape(-1, 4)
q2 = q2.reshape(-1, 4)
# extract components from quaternions
w1, x1, y1, z1 = q1[:, 0], q1[:, 1], q1[:, 2], q1[:, 3]
w2, x2, y2, z2 = q2[:, 0], q2[:, 1], q2[:, 2], q2[:, 3]
# perform multiplication
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
return torch.stack([w, x, y, z], dim=-1).view(shape)
|
Multiply two quaternions together.
Args:
q1: The first quaternion in (w, x, y, z). Shape is (..., 4).
q2: The second quaternion in (w, x, y, z). Shape is (..., 4).
Returns:
The product of the two quaternions in (w, x, y, z). Shape is (..., 4).
Raises:
ValueError: Input shapes of ``q1`` and ``q2`` are not matching.
|
quat_mul
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_box_minus(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
"""The box-minus operator (quaternion difference) between two quaternions.
Args:
q1: The first quaternion in (w, x, y, z). Shape is (N, 4).
q2: The second quaternion in (w, x, y, z). Shape is (N, 4).
Returns:
The difference between the two quaternions. Shape is (N, 3).
"""
quat_diff = quat_mul(q1, quat_conjugate(q2)) # q1 * q2^-1
re = quat_diff[:, 0] # real part, q = [w, x, y, z] = [re, im]
im = quat_diff[:, 1:] # imaginary part
norm_im = torch.norm(im, dim=1)
scale = 2.0 * torch.where(norm_im > 1.0e-7, torch.atan2(norm_im, re) / norm_im, torch.sign(re))
return scale.unsqueeze(-1) * im
|
The box-minus operator (quaternion difference) between two quaternions.
Args:
q1: The first quaternion in (w, x, y, z). Shape is (N, 4).
q2: The second quaternion in (w, x, y, z). Shape is (N, 4).
Returns:
The difference between the two quaternions. Shape is (N, 3).
|
quat_box_minus
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def yaw_quat(quat: torch.Tensor) -> torch.Tensor:
"""Extract the yaw component of a quaternion.
Args:
quat: The orientation in (w, x, y, z). Shape is (..., 4)
Returns:
A quaternion with only yaw component.
"""
shape = quat.shape
quat_yaw = quat.clone().view(-1, 4)
qw = quat_yaw[:, 0]
qx = quat_yaw[:, 1]
qy = quat_yaw[:, 2]
qz = quat_yaw[:, 3]
yaw = torch.atan2(2 * (qw * qz + qx * qy), 1 - 2 * (qy * qy + qz * qz))
quat_yaw[:] = 0.0
quat_yaw[:, 3] = torch.sin(yaw / 2)
quat_yaw[:, 0] = torch.cos(yaw / 2)
quat_yaw = normalize(quat_yaw)
return quat_yaw.view(shape)
|
Extract the yaw component of a quaternion.
Args:
quat: The orientation in (w, x, y, z). Shape is (..., 4)
Returns:
A quaternion with only yaw component.
|
yaw_quat
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_apply(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
"""Apply a quaternion rotation to a vector.
Args:
quat: The quaternion in (w, x, y, z). Shape is (..., 4).
vec: The vector in (x, y, z). Shape is (..., 3).
Returns:
The rotated vector in (x, y, z). Shape is (..., 3).
"""
# store shape
shape = vec.shape
# reshape to (N, 3) for multiplication
quat = quat.reshape(-1, 4)
vec = vec.reshape(-1, 3)
# extract components from quaternions
xyz = quat[:, 1:]
t = xyz.cross(vec, dim=-1) * 2
return (vec + quat[:, 0:1] * t + xyz.cross(t, dim=-1)).view(shape)
|
Apply a quaternion rotation to a vector.
Args:
quat: The quaternion in (w, x, y, z). Shape is (..., 4).
vec: The vector in (x, y, z). Shape is (..., 3).
Returns:
The rotated vector in (x, y, z). Shape is (..., 3).
|
quat_apply
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_apply_yaw(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
"""Rotate a vector only around the yaw-direction.
Args:
quat: The orientation in (w, x, y, z). Shape is (N, 4).
vec: The vector in (x, y, z). Shape is (N, 3).
Returns:
The rotated vector in (x, y, z). Shape is (N, 3).
"""
quat_yaw = yaw_quat(quat)
return quat_apply(quat_yaw, vec)
|
Rotate a vector only around the yaw-direction.
Args:
quat: The orientation in (w, x, y, z). Shape is (N, 4).
vec: The vector in (x, y, z). Shape is (N, 3).
Returns:
The rotated vector in (x, y, z). Shape is (N, 3).
|
quat_apply_yaw
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_rotate(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
"""Rotate a vector by a quaternion along the last dimension of q and v.
Args:
q: The quaternion in (w, x, y, z). Shape is (..., 4).
v: The vector in (x, y, z). Shape is (..., 3).
Returns:
The rotated vector in (x, y, z). Shape is (..., 3).
"""
q_w = q[..., 0]
q_vec = q[..., 1:]
a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
# for two-dimensional tensors, bmm is faster than einsum
if q_vec.dim() == 2:
c = q_vec * torch.bmm(q_vec.view(q.shape[0], 1, 3), v.view(q.shape[0], 3, 1)).squeeze(-1) * 2.0
else:
c = q_vec * torch.einsum("...i,...i->...", q_vec, v).unsqueeze(-1) * 2.0
return a + b + c
|
Rotate a vector by a quaternion along the last dimension of q and v.
Args:
q: The quaternion in (w, x, y, z). Shape is (..., 4).
v: The vector in (x, y, z). Shape is (..., 3).
Returns:
The rotated vector in (x, y, z). Shape is (..., 3).
|
quat_rotate
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_rotate_inverse(q: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
"""Rotate a vector by the inverse of a quaternion along the last dimension of q and v.
Args:
q: The quaternion in (w, x, y, z). Shape is (..., 4).
v: The vector in (x, y, z). Shape is (..., 3).
Returns:
The rotated vector in (x, y, z). Shape is (..., 3).
"""
q_w = q[..., 0]
q_vec = q[..., 1:]
a = v * (2.0 * q_w**2 - 1.0).unsqueeze(-1)
b = torch.cross(q_vec, v, dim=-1) * q_w.unsqueeze(-1) * 2.0
# for two-dimensional tensors, bmm is faster than einsum
if q_vec.dim() == 2:
c = q_vec * torch.bmm(q_vec.view(q.shape[0], 1, 3), v.view(q.shape[0], 3, 1)).squeeze(-1) * 2.0
else:
c = q_vec * torch.einsum("...i,...i->...", q_vec, v).unsqueeze(-1) * 2.0
return a - b + c
|
Rotate a vector by the inverse of a quaternion along the last dimension of q and v.
Args:
q: The quaternion in (w, x, y, z). Shape is (..., 4).
v: The vector in (x, y, z). Shape is (..., 3).
Returns:
The rotated vector in (x, y, z). Shape is (..., 3).
|
quat_rotate_inverse
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
def quat_from_angle_axis(angle: torch.Tensor, axis: torch.Tensor) -> torch.Tensor:
"""Convert rotations given as angle-axis to quaternions.
Args:
angle: The angle turned anti-clockwise in radians around the vector's direction. Shape is (N,).
axis: The axis of rotation. Shape is (N, 3).
Returns:
The quaternion in (w, x, y, z). Shape is (N, 4).
"""
theta = (angle / 2).unsqueeze(-1)
xyz = normalize(axis) * theta.sin()
w = theta.cos()
return normalize(torch.cat([w, xyz], dim=-1))
|
Convert rotations given as angle-axis to quaternions.
Args:
angle: The angle turned anti-clockwise in radians around the vector's direction. Shape is (N,).
axis: The axis of rotation. Shape is (N, 3).
Returns:
The quaternion in (w, x, y, z). Shape is (N, 4).
|
quat_from_angle_axis
|
python
|
NVlabs/HOVER
|
neural_wbc/core/neural_wbc/core/math_utils.py
|
https://github.com/NVlabs/HOVER/blob/master/neural_wbc/core/neural_wbc/core/math_utils.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.