Muhammad Taqi Raza
commited on
Commit
·
79ff636
1
Parent(s):
43360f0
adding aspect ratio
Browse files- gradio_app.py +7 -11
gradio_app.py
CHANGED
@@ -30,7 +30,7 @@ def download_models():
|
|
30 |
subprocess.check_call(["bash", "download/download_models.sh"])
|
31 |
print("✅ Models downloaded.")
|
32 |
except subprocess.CalledProcessError as e:
|
33 |
-
print(f"
|
34 |
else:
|
35 |
print("✅ Pretrained models already exist.")
|
36 |
|
@@ -84,9 +84,9 @@ def get_anchor_video(video_path, fps, num_frames, target_pose, mode,
|
|
84 |
"--window_size", str(window_size),
|
85 |
"--overlap", str(overlap),
|
86 |
"--max_res", str(max_res),
|
87 |
-
|
88 |
"--seed", str(seed_input),
|
89 |
-
"--height", str(height),
|
90 |
"--width", str(width),
|
91 |
"--target_aspect_ratio", w.strip(), h.strip()
|
92 |
]
|
@@ -95,7 +95,7 @@ def get_anchor_video(video_path, fps, num_frames, target_pose, mode,
|
|
95 |
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
96 |
logs += result.stdout
|
97 |
except subprocess.CalledProcessError as e:
|
98 |
-
logs += f"
|
99 |
return None, logs
|
100 |
|
101 |
return str(video_output_path), logs
|
@@ -136,7 +136,7 @@ def inference(
|
|
136 |
"--controlnet_transformer_num_layers", str(controlnet_transformer_num_layers),
|
137 |
|
138 |
]
|
139 |
-
|
140 |
if upscale:
|
141 |
command.extend(["--upscale", "--upscale_factor", str(upscale_factor)])
|
142 |
|
@@ -152,9 +152,6 @@ def inference(
|
|
152 |
video_output = f"{out_dir}/00000_{seed}_out.mp4"
|
153 |
return video_output if os.path.exists(video_output) else None, logs
|
154 |
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
# -----------------------------
|
159 |
# UI
|
160 |
# -----------------------------
|
@@ -168,11 +165,10 @@ with demo:
|
|
168 |
with gr.Row():
|
169 |
with gr.Column():
|
170 |
with gr.Row():
|
171 |
-
near_far_estimated = gr.Checkbox(label="Near Far Estimation", value=True)
|
172 |
pose_input = gr.Textbox(label="Target Pose (θ φ r x y)", placeholder="e.g., 0 30 -0.6 0 0")
|
173 |
fps_input = gr.Number(value=24, label="FPS")
|
174 |
-
aspect_ratio_inputs=gr.Textbox(label="Target Aspect Ratio (e.g., 2,3)")
|
175 |
-
|
176 |
num_frames_input = gr.Number(value=49, label="Number of Frames")
|
177 |
radius_input = gr.Number(value = 1.0, label="Radius Scale")
|
178 |
mode_input = gr.Dropdown(choices=["gradual"], value="gradual", label="Camera Mode")
|
|
|
30 |
subprocess.check_call(["bash", "download/download_models.sh"])
|
31 |
print("✅ Models downloaded.")
|
32 |
except subprocess.CalledProcessError as e:
|
33 |
+
print(f"Model download failed: {e}")
|
34 |
else:
|
35 |
print("✅ Pretrained models already exist.")
|
36 |
|
|
|
84 |
"--window_size", str(window_size),
|
85 |
"--overlap", str(overlap),
|
86 |
"--max_res", str(max_res),
|
87 |
+
"--sample_size", sample_size if sample_size else "384,672",
|
88 |
"--seed", str(seed_input),
|
89 |
+
"--height", str(height),
|
90 |
"--width", str(width),
|
91 |
"--target_aspect_ratio", w.strip(), h.strip()
|
92 |
]
|
|
|
95 |
result = subprocess.run(command, capture_output=True, text=True, check=True)
|
96 |
logs += result.stdout
|
97 |
except subprocess.CalledProcessError as e:
|
98 |
+
logs += f"Inference failed:\n{e.stderr}{e.stdout}"
|
99 |
return None, logs
|
100 |
|
101 |
return str(video_output_path), logs
|
|
|
136 |
"--controlnet_transformer_num_layers", str(controlnet_transformer_num_layers),
|
137 |
|
138 |
]
|
139 |
+
|
140 |
if upscale:
|
141 |
command.extend(["--upscale", "--upscale_factor", str(upscale_factor)])
|
142 |
|
|
|
152 |
video_output = f"{out_dir}/00000_{seed}_out.mp4"
|
153 |
return video_output if os.path.exists(video_output) else None, logs
|
154 |
|
|
|
|
|
|
|
155 |
# -----------------------------
|
156 |
# UI
|
157 |
# -----------------------------
|
|
|
165 |
with gr.Row():
|
166 |
with gr.Column():
|
167 |
with gr.Row():
|
168 |
+
near_far_estimated = gr.Checkbox(label="Near Far Estimation", value=True)
|
169 |
pose_input = gr.Textbox(label="Target Pose (θ φ r x y)", placeholder="e.g., 0 30 -0.6 0 0")
|
170 |
fps_input = gr.Number(value=24, label="FPS")
|
171 |
+
aspect_ratio_inputs=gr.Textbox(label="Target Aspect Ratio (e.g., 2,3)")
|
|
|
172 |
num_frames_input = gr.Number(value=49, label="Number of Frames")
|
173 |
radius_input = gr.Number(value = 1.0, label="Radius Scale")
|
174 |
mode_input = gr.Dropdown(choices=["gradual"], value="gradual", label="Camera Mode")
|