Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,6 +10,12 @@ import tempfile
|
|
| 10 |
from depth_anything.dpt import DepthAnything
|
| 11 |
from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
|
| 14 |
# Define path for temporary processed frames
|
| 15 |
temp_frame_dir = tempfile.mkdtemp()
|
|
@@ -76,8 +82,7 @@ def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
|
|
| 76 |
frame = transform({'image': frame})['image']
|
| 77 |
frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
|
| 78 |
|
| 79 |
-
|
| 80 |
-
depth = depth_anything(frame)
|
| 81 |
|
| 82 |
depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
|
| 83 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
|
|
|
| 10 |
from depth_anything.dpt import DepthAnything
|
| 11 |
from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
|
| 12 |
|
| 13 |
+
|
| 14 |
+
@spaces.GPU
|
| 15 |
+
@torch.no_grad()
|
| 16 |
+
def predict_depth(model, image):
|
| 17 |
+
return model(image)
|
| 18 |
+
|
| 19 |
def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
|
| 20 |
# Define path for temporary processed frames
|
| 21 |
temp_frame_dir = tempfile.mkdtemp()
|
|
|
|
| 82 |
frame = transform({'image': frame})['image']
|
| 83 |
frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
|
| 84 |
|
| 85 |
+
predict_depth(depth_anything, frame)
|
|
|
|
| 86 |
|
| 87 |
depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
|
| 88 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|