Krokodilpirat commited on
Commit
3df51c9
·
verified ·
1 Parent(s): a806278

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -10
app.py CHANGED
@@ -1,9 +1,13 @@
1
  import os
 
 
 
 
 
2
  import gc
3
  import torch
4
  import cv2
5
  import gradio as gr
6
- import gradio as gr
7
  print("📦 Gradio version:", gr.__version__)
8
  import numpy as np
9
  import matplotlib.cm as cm
@@ -51,18 +55,12 @@ model_name = encoder2name[encoder]
51
  # Initialize the model.
52
  video_depth_anything = VideoDepthAnything(**model_configs[encoder])
53
 
54
- # Set cache directories to writable locations
55
- os.environ["HF_HOME"] = "/tmp/huggingface"
56
- os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface/transformers"
57
- os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
58
-
59
  filepath = hf_hub_download(
60
  repo_id=f"depth-anything/Video-Depth-Anything-{model_name}",
61
  filename=f"video_depth_anything_{encoder}.pth",
62
  repo_type="model",
63
  cache_dir="/tmp/huggingface" # Explizites Setzen des Cache-Verzeichnisses
64
  )
65
-
66
  video_depth_anything.load_state_dict(torch.load(filepath, map_location='cpu'))
67
  video_depth_anything = video_depth_anything.to(DEVICE).eval()
68
 
@@ -177,9 +175,10 @@ def construct_demo():
177
  input_video = gr.Video(label="Input Video")
178
  with gr.Column(scale=2):
179
  with gr.Row(equal_height=True):
180
- processed_video = gr.Video(label="Preprocessed Video", interactive=False, autoplay=True, loop=True, show_share_button=True, scale=5)
181
- depth_vis_video = gr.Video(label="Generated Depth Video", interactive=False, autoplay=True, loop=True, show_share_button=True, scale=5)
182
- stitched_video = gr.Video(label="Stitched RGBD Video", interactive=False, autoplay=True, loop=True, show_share_button=True, scale=5)
 
183
 
184
  with gr.Row(equal_height=True):
185
  with gr.Column(scale=1):
 
1
  import os
2
+ # Set cache directories to writable locations right at the beginning
3
+ os.environ["HF_HOME"] = "/tmp/huggingface"
4
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface/transformers"
5
+ os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
6
+
7
  import gc
8
  import torch
9
  import cv2
10
  import gradio as gr
 
11
  print("📦 Gradio version:", gr.__version__)
12
  import numpy as np
13
  import matplotlib.cm as cm
 
55
  # Initialize the model.
56
  video_depth_anything = VideoDepthAnything(**model_configs[encoder])
57
 
 
 
 
 
 
58
  filepath = hf_hub_download(
59
  repo_id=f"depth-anything/Video-Depth-Anything-{model_name}",
60
  filename=f"video_depth_anything_{encoder}.pth",
61
  repo_type="model",
62
  cache_dir="/tmp/huggingface" # Explizites Setzen des Cache-Verzeichnisses
63
  )
 
64
  video_depth_anything.load_state_dict(torch.load(filepath, map_location='cpu'))
65
  video_depth_anything = video_depth_anything.to(DEVICE).eval()
66
 
 
175
  input_video = gr.Video(label="Input Video")
176
  with gr.Column(scale=2):
177
  with gr.Row(equal_height=True):
178
+ # Removed loop parameter which is not supported in Gradio 4.36.0
179
+ processed_video = gr.Video(label="Preprocessed Video", interactive=False, autoplay=True, show_share_button=True, scale=5)
180
+ depth_vis_video = gr.Video(label="Generated Depth Video", interactive=False, autoplay=True, show_share_button=True, scale=5)
181
+ stitched_video = gr.Video(label="Stitched RGBD Video", interactive=False, autoplay=True, show_share_button=True, scale=5)
182
 
183
  with gr.Row(equal_height=True):
184
  with gr.Column(scale=1):