Muhammad Taqi Raza
commited on
Commit
·
2160ac9
1
Parent(s):
d3d1fbf
adding fps options, and testing inference-1
Browse files- gradio_app.py +7 -6
- inference/v2v_data/demo.py +2 -2
gradio_app.py
CHANGED
@@ -206,7 +206,7 @@ download_models()
|
|
206 |
# -----------------------------
|
207 |
# Step 2: Inference Logic
|
208 |
# -----------------------------
|
209 |
-
def run_epic_inference(video_path, num_frames, target_pose, mode):
|
210 |
temp_input_path = "/app/temp_input.mp4"
|
211 |
output_dir = "/app/output_anchor"
|
212 |
video_output_path = f"{output_dir}/masked_videos/output.mp4"
|
@@ -218,7 +218,7 @@ def run_epic_inference(video_path, num_frames, target_pose, mode):
|
|
218 |
try:
|
219 |
theta, phi, r, x, y = target_pose.strip().split()
|
220 |
except ValueError:
|
221 |
-
return f"
|
222 |
logs = f"Running inference with target pose: θ={theta}, φ={phi}, r={r}, x={x}, y={y}\n"
|
223 |
command = [
|
224 |
"python", "/app/inference/v2v_data/inference.py",
|
@@ -232,8 +232,8 @@ def run_epic_inference(video_path, num_frames, target_pose, mode):
|
|
232 |
"--video_length", str(num_frames),
|
233 |
"--save_name", "output",
|
234 |
"--mode", mode,
|
|
|
235 |
]
|
236 |
-
|
237 |
try:
|
238 |
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
239 |
logs += result.stdout
|
@@ -255,8 +255,8 @@ def print_output_directory(out_dir):
|
|
255 |
return result
|
256 |
|
257 |
def inference(video_path, num_frames, fps, target_pose, mode):
|
258 |
-
logs, video_masked = run_epic_inference(video_path, num_frames, target_pose, mode)
|
259 |
-
|
260 |
result_dir = print_output_directory("/app/output_anchor")
|
261 |
|
262 |
|
@@ -291,7 +291,8 @@ def inference(video_path, num_frames, fps, target_pose, mode):
|
|
291 |
"--controlnet_transformer_num_layers", "8",
|
292 |
"--infer_with_mask",
|
293 |
"--pool_style", "max",
|
294 |
-
"--seed", "43"
|
|
|
295 |
]
|
296 |
|
297 |
result = subprocess.run(command, capture_output=True, text=True)
|
|
|
206 |
# -----------------------------
|
207 |
# Step 2: Inference Logic
|
208 |
# -----------------------------
|
209 |
+
def run_epic_inference(video_path, fps, num_frames, target_pose, mode):
|
210 |
temp_input_path = "/app/temp_input.mp4"
|
211 |
output_dir = "/app/output_anchor"
|
212 |
video_output_path = f"{output_dir}/masked_videos/output.mp4"
|
|
|
218 |
try:
|
219 |
theta, phi, r, x, y = target_pose.strip().split()
|
220 |
except ValueError:
|
221 |
+
return f"Invalid target pose format. Use: θ φ r x y", None
|
222 |
logs = f"Running inference with target pose: θ={theta}, φ={phi}, r={r}, x={x}, y={y}\n"
|
223 |
command = [
|
224 |
"python", "/app/inference/v2v_data/inference.py",
|
|
|
232 |
"--video_length", str(num_frames),
|
233 |
"--save_name", "output",
|
234 |
"--mode", mode,
|
235 |
+
"--fps", fps
|
236 |
]
|
|
|
237 |
try:
|
238 |
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
239 |
logs += result.stdout
|
|
|
255 |
return result
|
256 |
|
257 |
def inference(video_path, num_frames, fps, target_pose, mode):
|
258 |
+
logs, video_masked = run_epic_inference(video_path, fps, num_frames, target_pose, mode)
|
259 |
+
return logs, video_masked
|
260 |
result_dir = print_output_directory("/app/output_anchor")
|
261 |
|
262 |
|
|
|
291 |
"--controlnet_transformer_num_layers", "8",
|
292 |
"--infer_with_mask",
|
293 |
"--pool_style", "max",
|
294 |
+
"--seed", "43",
|
295 |
+
"--fps", fps
|
296 |
]
|
297 |
|
298 |
result = subprocess.run(command, capture_output=True, text=True)
|
inference/v2v_data/demo.py
CHANGED
@@ -180,8 +180,8 @@ class GetAnchorVideos:
|
|
180 |
mask_save = process_mask_tensor(torch.cat(masks)).squeeze().cpu().numpy()
|
181 |
np.save(f"{opts.out_dir}/depth/{save_name}.npy",depths.cpu().numpy())
|
182 |
np.savez_compressed(f"{opts.out_dir}/masks/{save_name}.npz",mask=mask_save)
|
183 |
-
save_video_as_mp4(ori_video_save,f"{opts.out_dir}/videos/{save_name}.mp4", fps=
|
184 |
-
save_video_as_mp4(cond_video_save,f"{opts.out_dir}/masked_videos/{save_name}.mp4", fps=
|
185 |
np.save(f'{opts.out_dir}/post_t/' + save_name + '.npy',pose_t.cpu().numpy())
|
186 |
np.save(f'{opts.out_dir}/pose_s/' + save_name + '.npy',pose_s.cpu().numpy())
|
187 |
np.save(f'{opts.out_dir}/intrinsics/' + save_name + '.npy',K[0].cpu().numpy())
|
|
|
180 |
mask_save = process_mask_tensor(torch.cat(masks)).squeeze().cpu().numpy()
|
181 |
np.save(f"{opts.out_dir}/depth/{save_name}.npy",depths.cpu().numpy())
|
182 |
np.savez_compressed(f"{opts.out_dir}/masks/{save_name}.npz",mask=mask_save)
|
183 |
+
save_video_as_mp4(ori_video_save,f"{opts.out_dir}/videos/{save_name}.mp4", fps=opts.fps)
|
184 |
+
save_video_as_mp4(cond_video_save,f"{opts.out_dir}/masked_videos/{save_name}.mp4", fps=opts.fps)
|
185 |
np.save(f'{opts.out_dir}/post_t/' + save_name + '.npy',pose_t.cpu().numpy())
|
186 |
np.save(f'{opts.out_dir}/pose_s/' + save_name + '.npy',pose_s.cpu().numpy())
|
187 |
np.save(f'{opts.out_dir}/intrinsics/' + save_name + '.npy',K[0].cpu().numpy())
|