Update app.py
Browse files
app.py
CHANGED
@@ -18,25 +18,6 @@ from video_depth_anything.video_depth import VideoDepthAnything
|
|
18 |
from utils.dc_utils import read_video_frames, save_video
|
19 |
from huggingface_hub import hf_hub_download
|
20 |
|
21 |
-
# Examples for the Gradio Demo.
|
22 |
-
# Each example now contains 8 parameters:
|
23 |
-
# [video_path, max_len, target_fps, max_res, stitch, grayscale, convert_from_color, blur]
|
24 |
-
examples = [
|
25 |
-
['assets/example_videos/octopus_01.mp4', -1, -1, 1280, True, True, True, 0.3],
|
26 |
-
['assets/example_videos/chicken_01.mp4', -1, -1, 1280, True, True, True, 0.3],
|
27 |
-
['assets/example_videos/gorilla_01.mp4', -1, -1, 1280, True, True, True, 0.3],
|
28 |
-
['assets/example_videos/davis_rollercoaster.mp4', -1, -1, 1280, True, True, True, 0.3],
|
29 |
-
['assets/example_videos/Tokyo-Walk_rgb.mp4', -1, -1, 1280, True, True, True, 0.3],
|
30 |
-
['assets/example_videos/4158877-uhd_3840_2160_30fps_rgb.mp4', -1, -1, 1280, True, True, True, 0.3],
|
31 |
-
['assets/example_videos/4511004-uhd_3840_2160_24fps_rgb.mp4', -1, -1, 1280, True, True, True, 0.3],
|
32 |
-
['assets/example_videos/1753029-hd_1920_1080_30fps.mp4', -1, -1, 1280, True, True, True, 0.3],
|
33 |
-
['assets/example_videos/davis_burnout.mp4', -1, -1, 1280, True, True, True, 0.3],
|
34 |
-
['assets/example_videos/example_5473765-l.mp4', -1, -1, 1280, True, True, True, 0.3],
|
35 |
-
['assets/example_videos/Istanbul-26920.mp4', -1, -1, 1280, True, True, True, 0.3],
|
36 |
-
['assets/example_videos/obj_1.mp4', -1, -1, 1280, True, True, True, 0.3],
|
37 |
-
['assets/example_videos/sheep_cut1.mp4', -1, -1, 1280, True, True, True, 0.3],
|
38 |
-
]
|
39 |
-
|
40 |
# Use GPU if available; otherwise, use CPU.
|
41 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
42 |
|
@@ -194,14 +175,7 @@ def construct_demo():
|
|
194 |
with gr.Column(scale=2):
|
195 |
pass
|
196 |
|
197 |
-
#
|
198 |
-
gr.Examples(
|
199 |
-
examples=examples,
|
200 |
-
inputs=[input_video, max_len, target_fps, max_res, stitch_option, grayscale_option, convert_from_color_option, blur_slider],
|
201 |
-
outputs=[processed_video, depth_vis_video, stitched_video],
|
202 |
-
fn=infer_video_depth,
|
203 |
-
cache_examples=False,
|
204 |
-
)
|
205 |
|
206 |
generate_btn.click(
|
207 |
fn=infer_video_depth,
|
|
|
18 |
from utils.dc_utils import read_video_frames, save_video
|
19 |
from huggingface_hub import hf_hub_download
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
# Use GPU if available; otherwise, use CPU.
|
22 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
23 |
|
|
|
175 |
with gr.Column(scale=2):
|
176 |
pass
|
177 |
|
178 |
+
# Examples-Block wurde entfernt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
generate_btn.click(
|
181 |
fn=infer_video_depth,
|