Spaces:
Runtime error
Runtime error
Upload hy3dgen/texgen/differentiable_renderer/camera_utils.py with huggingface_hub
Browse files
hy3dgen/texgen/differentiable_renderer/camera_utils.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Open Source Model Licensed under the Apache License Version 2.0
|
2 |
+
# and Other Licenses of the Third-Party Components therein:
|
3 |
+
# The below Model in this distribution may have been modified by THL A29 Limited
|
4 |
+
# ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
|
5 |
+
|
6 |
+
# Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
|
7 |
+
# The below software and/or models in this distribution may have been
|
8 |
+
# modified by THL A29 Limited ("Tencent Modifications").
|
9 |
+
# All Tencent Modifications are Copyright (C) THL A29 Limited.
|
10 |
+
|
11 |
+
# Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
|
12 |
+
# except for the third-party components listed below.
|
13 |
+
# Hunyuan 3D does not impose any additional limitations beyond what is outlined
|
14 |
+
# in the repsective licenses of these third-party components.
|
15 |
+
# Users must comply with all terms and conditions of original licenses of these third-party
|
16 |
+
# components and must ensure that the usage of the third party components adheres to
|
17 |
+
# all relevant laws and regulations.
|
18 |
+
|
19 |
+
# For avoidance of doubts, Hunyuan 3D means the large language models and
|
20 |
+
# their software and algorithms, including trained model weights, parameters (including
|
21 |
+
# optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
|
22 |
+
# fine-tuning enabling code and other elements of the foregoing made publicly available
|
23 |
+
# by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
|
24 |
+
|
25 |
+
import math
|
26 |
+
|
27 |
+
import numpy as np
|
28 |
+
import torch
|
29 |
+
|
30 |
+
|
31 |
+
def transform_pos(mtx, pos, keepdim=False):
|
32 |
+
t_mtx = torch.from_numpy(mtx).to(
|
33 |
+
pos.device) if isinstance(
|
34 |
+
mtx, np.ndarray) else mtx
|
35 |
+
if pos.shape[-1] == 3:
|
36 |
+
posw = torch.cat(
|
37 |
+
[pos, torch.ones([pos.shape[0], 1]).to(pos.device)], axis=1)
|
38 |
+
else:
|
39 |
+
posw = pos
|
40 |
+
|
41 |
+
if keepdim:
|
42 |
+
return torch.matmul(posw, t_mtx.t())[...]
|
43 |
+
else:
|
44 |
+
return torch.matmul(posw, t_mtx.t())[None, ...]
|
45 |
+
|
46 |
+
|
47 |
+
def get_mv_matrix(elev, azim, camera_distance, center=None):
|
48 |
+
elev = -elev
|
49 |
+
azim += 90
|
50 |
+
|
51 |
+
elev_rad = math.radians(elev)
|
52 |
+
azim_rad = math.radians(azim)
|
53 |
+
|
54 |
+
camera_position = np.array([camera_distance * math.cos(elev_rad) * math.cos(azim_rad),
|
55 |
+
camera_distance *
|
56 |
+
math.cos(elev_rad) * math.sin(azim_rad),
|
57 |
+
camera_distance * math.sin(elev_rad)])
|
58 |
+
|
59 |
+
if center is None:
|
60 |
+
center = np.array([0, 0, 0])
|
61 |
+
else:
|
62 |
+
center = np.array(center)
|
63 |
+
|
64 |
+
lookat = center - camera_position
|
65 |
+
lookat = lookat / np.linalg.norm(lookat)
|
66 |
+
|
67 |
+
up = np.array([0, 0, 1.0])
|
68 |
+
right = np.cross(lookat, up)
|
69 |
+
right = right / np.linalg.norm(right)
|
70 |
+
up = np.cross(right, lookat)
|
71 |
+
up = up / np.linalg.norm(up)
|
72 |
+
|
73 |
+
c2w = np.concatenate(
|
74 |
+
[np.stack([right, up, -lookat], axis=-1), camera_position[:, None]], axis=-1)
|
75 |
+
|
76 |
+
w2c = np.zeros((4, 4))
|
77 |
+
w2c[:3, :3] = np.transpose(c2w[:3, :3], (1, 0))
|
78 |
+
w2c[:3, 3:] = -np.matmul(np.transpose(c2w[:3, :3], (1, 0)), c2w[:3, 3:])
|
79 |
+
w2c[3, 3] = 1.0
|
80 |
+
|
81 |
+
return w2c.astype(np.float32)
|
82 |
+
|
83 |
+
|
84 |
+
def get_orthographic_projection_matrix(
|
85 |
+
left=-1, right=1, bottom=-1, top=1, near=0, far=2):
|
86 |
+
"""
|
87 |
+
计算正交投影矩阵。
|
88 |
+
|
89 |
+
参数:
|
90 |
+
left (float): 投影区域左侧边界。
|
91 |
+
right (float): 投影区域右侧边界。
|
92 |
+
bottom (float): 投影区域底部边界。
|
93 |
+
top (float): 投影区域顶部边界。
|
94 |
+
near (float): 投影区域近裁剪面距离。
|
95 |
+
far (float): 投影区域远裁剪面距离。
|
96 |
+
|
97 |
+
返回:
|
98 |
+
numpy.ndarray: 正交投影矩阵。
|
99 |
+
"""
|
100 |
+
ortho_matrix = np.eye(4, dtype=np.float32)
|
101 |
+
ortho_matrix[0, 0] = 2 / (right - left)
|
102 |
+
ortho_matrix[1, 1] = 2 / (top - bottom)
|
103 |
+
ortho_matrix[2, 2] = -2 / (far - near)
|
104 |
+
ortho_matrix[0, 3] = -(right + left) / (right - left)
|
105 |
+
ortho_matrix[1, 3] = -(top + bottom) / (top - bottom)
|
106 |
+
ortho_matrix[2, 3] = -(far + near) / (far - near)
|
107 |
+
return ortho_matrix
|
108 |
+
|
109 |
+
|
110 |
+
def get_perspective_projection_matrix(fovy, aspect_wh, near, far):
|
111 |
+
fovy_rad = math.radians(fovy)
|
112 |
+
return np.array([[1.0 / (math.tan(fovy_rad / 2.0) * aspect_wh), 0, 0, 0],
|
113 |
+
[0, 1.0 / math.tan(fovy_rad / 2.0), 0, 0],
|
114 |
+
[0, 0, -(far + near) / (far - near), -
|
115 |
+
2.0 * far * near / (far - near)],
|
116 |
+
[0, 0, -1, 0]]).astype(np.float32)
|