Spaces:
Runtime error
Runtime error
Commit
·
de71a62
1
Parent(s):
9588460
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ from transformers import AutoProcessor, AutoModel
|
|
5 |
from PIL import Image
|
6 |
import cv2
|
7 |
from concurrent.futures import ThreadPoolExecutor
|
|
|
8 |
|
9 |
|
10 |
MODEL_NAME = "microsoft/xclip-base-patch16-zero-shot"
|
@@ -18,20 +19,28 @@ print ("device")
|
|
18 |
processor = AutoProcessor.from_pretrained(MODEL_NAME)
|
19 |
model = AutoModel.from_pretrained(MODEL_NAME).to(device)
|
20 |
|
|
|
21 |
def get_video_length(file_path):
|
22 |
-
|
23 |
-
|
24 |
-
cap.release()
|
25 |
-
return length
|
26 |
|
27 |
-
def
|
28 |
frames = []
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
return frames
|
36 |
|
37 |
def get_frame(file_path, index):
|
@@ -71,7 +80,7 @@ def concatenate_frames(frames, clip_len):
|
|
71 |
def model_interface(uploaded_video, activity):
|
72 |
video_length = get_video_length(uploaded_video)
|
73 |
indices = sample_uniform_frame_indices(CLIP_LEN, seg_len=video_length)
|
74 |
-
video =
|
75 |
concatenated_image = concatenate_frames(video, CLIP_LEN)
|
76 |
|
77 |
activities_list = [activity, "other"]
|
|
|
5 |
from PIL import Image
|
6 |
import cv2
|
7 |
from concurrent.futures import ThreadPoolExecutor
|
8 |
+
import PyNvCodec as nvc
|
9 |
|
10 |
|
11 |
MODEL_NAME = "microsoft/xclip-base-patch16-zero-shot"
|
|
|
19 |
processor = AutoProcessor.from_pretrained(MODEL_NAME)
|
20 |
model = AutoModel.from_pretrained(MODEL_NAME).to(device)
|
21 |
|
22 |
+
|
23 |
def get_video_length(file_path):
|
24 |
+
decoder = nvc.PyNvDecoder(file_path, 0) # 0 indicates GPU ID
|
25 |
+
return decoder.FramesCount()
|
|
|
|
|
26 |
|
27 |
+
def read_video_nvcodec(file_path, indices):
|
28 |
frames = []
|
29 |
+
decoder = nvc.PyNvDecoder(file_path, 0) # 0 indicates GPU ID
|
30 |
+
|
31 |
+
nv12_surf_plane = nvc.PySurface()
|
32 |
+
for i in range(max(indices) + 1):
|
33 |
+
success = decoder.DecodeSingleSurface(nv12_surf_plane)
|
34 |
+
if not success:
|
35 |
+
break
|
36 |
+
|
37 |
+
if i in indices:
|
38 |
+
rgb_surf = nv12_surf_plane.ToColor(nvc.PixelFormat.RGB)
|
39 |
+
h, w, c = rgb_surf.HostShape()
|
40 |
+
frame = np.ndarray(shape=(h, w, c), dtype=np.uint8, order='C')
|
41 |
+
rgb_surf.Download(frame)
|
42 |
+
frames.append(frame)
|
43 |
+
|
44 |
return frames
|
45 |
|
46 |
def get_frame(file_path, index):
|
|
|
80 |
def model_interface(uploaded_video, activity):
|
81 |
video_length = get_video_length(uploaded_video)
|
82 |
indices = sample_uniform_frame_indices(CLIP_LEN, seg_len=video_length)
|
83 |
+
video = read_video_nvcodec(uploaded_video, indices)
|
84 |
concatenated_image = concatenate_frames(video, CLIP_LEN)
|
85 |
|
86 |
activities_list = [activity, "other"]
|