Spaces:
Sleeping
Sleeping
Commit
·
a30f1f1
1
Parent(s):
6ee5c65
add
Browse files- utils/other_tools_hf.py +7 -6
utils/other_tools_hf.py
CHANGED
@@ -694,11 +694,12 @@ def render_one_sequence_no_gt(
|
|
694 |
import moviepy.editor as mp
|
695 |
import librosa
|
696 |
|
|
|
697 |
model = smplx.create(model_folder, model_type=model_type,
|
698 |
gender=gender, use_face_contour=use_face_contour,
|
699 |
num_betas=num_betas,
|
700 |
num_expression_coeffs=num_expression_coeffs,
|
701 |
-
ext=ext, use_pca=False).
|
702 |
|
703 |
#data_npz = np.load(f"{output_dir}{res_npz_path}.npz")
|
704 |
data_np_body = np.load(res_npz_path, allow_pickle=True)
|
@@ -715,12 +716,12 @@ def render_one_sequence_no_gt(
|
|
715 |
seconds = 1
|
716 |
#data_npz["jaw_pose"].shape[0]
|
717 |
n = data_np_body["poses"].shape[0]
|
718 |
-
beta = torch.from_numpy(data_np_body["betas"]).to(torch.float32).unsqueeze(0).
|
719 |
beta = beta.repeat(n, 1)
|
720 |
-
expression = torch.from_numpy(data_np_body["expressions"][:n]).to(torch.float32).
|
721 |
-
jaw_pose = torch.from_numpy(data_np_body["poses"][:n, 66:69]).to(torch.float32).
|
722 |
-
pose = torch.from_numpy(data_np_body["poses"][:n]).to(torch.float32).
|
723 |
-
transl = torch.from_numpy(data_np_body["trans"][:n]).to(torch.float32).
|
724 |
# print(beta.shape, expression.shape, jaw_pose.shape, pose.shape, transl.shape, pose[:,:3].shape)
|
725 |
output = model(betas=beta, transl=transl, expression=expression, jaw_pose=jaw_pose,
|
726 |
global_orient=pose[:,:3], body_pose=pose[:,3:21*3+3], left_hand_pose=pose[:,25*3:40*3], right_hand_pose=pose[:,40*3:55*3],
|
|
|
694 |
import moviepy.editor as mp
|
695 |
import librosa
|
696 |
|
697 |
+
device = "cpu"
|
698 |
model = smplx.create(model_folder, model_type=model_type,
|
699 |
gender=gender, use_face_contour=use_face_contour,
|
700 |
num_betas=num_betas,
|
701 |
num_expression_coeffs=num_expression_coeffs,
|
702 |
+
ext=ext, use_pca=False).to(device)
|
703 |
|
704 |
#data_npz = np.load(f"{output_dir}{res_npz_path}.npz")
|
705 |
data_np_body = np.load(res_npz_path, allow_pickle=True)
|
|
|
716 |
seconds = 1
|
717 |
#data_npz["jaw_pose"].shape[0]
|
718 |
n = data_np_body["poses"].shape[0]
|
719 |
+
beta = torch.from_numpy(data_np_body["betas"]).to(torch.float32).unsqueeze(0).to(device)
|
720 |
beta = beta.repeat(n, 1)
|
721 |
+
expression = torch.from_numpy(data_np_body["expressions"][:n]).to(torch.float32).to(device)
|
722 |
+
jaw_pose = torch.from_numpy(data_np_body["poses"][:n, 66:69]).to(torch.float32).to(device)
|
723 |
+
pose = torch.from_numpy(data_np_body["poses"][:n]).to(torch.float32).to(device)
|
724 |
+
transl = torch.from_numpy(data_np_body["trans"][:n]).to(torch.float32).to(device)
|
725 |
# print(beta.shape, expression.shape, jaw_pose.shape, pose.shape, transl.shape, pose[:,:3].shape)
|
726 |
output = model(betas=beta, transl=transl, expression=expression, jaw_pose=jaw_pose,
|
727 |
global_orient=pose[:,:3], body_pose=pose[:,3:21*3+3], left_hand_pose=pose[:,25*3:40*3], right_hand_pose=pose[:,40*3:55*3],
|