Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -60,13 +60,9 @@ def read_video(video) -> torch.Tensor:
|
|
60 |
"""
|
61 |
Reads a video file and converts it into a torch.Tensor with the shape [F, C, H, W].
|
62 |
"""
|
63 |
-
video_tensor = []
|
64 |
to_tensor_transform = transforms.ToTensor()
|
65 |
if isinstance(video, str):
|
66 |
-
|
67 |
-
# Read all frames
|
68 |
-
for frame in reader:
|
69 |
-
video_tensor.append(to_tensor_transform(frame))
|
70 |
else: # video is a list of pil images
|
71 |
video_tensor = torch.stack([to_tensor_transform(img) for img in video])
|
72 |
return video_tensor
|
@@ -215,6 +211,7 @@ def generate_video(
|
|
215 |
|
216 |
# Convert to tensor
|
217 |
processed_video = read_video(processed_video)
|
|
|
218 |
|
219 |
progress(0.2, desc="Preparing generation parameters...")
|
220 |
|
|
|
60 |
"""
|
61 |
Reads a video file and converts it into a torch.Tensor with the shape [F, C, H, W].
|
62 |
"""
|
|
|
63 |
to_tensor_transform = transforms.ToTensor()
|
64 |
if isinstance(video, str):
|
65 |
+
video_tensor = torch.stack([to_tensor_transform(img) for img in imageio.get_reader(video)])
|
|
|
|
|
|
|
66 |
else: # video is a list of pil images
|
67 |
video_tensor = torch.stack([to_tensor_transform(img) for img in video])
|
68 |
return video_tensor
|
|
|
211 |
|
212 |
# Convert to tensor
|
213 |
processed_video = read_video(processed_video)
|
214 |
+
print(type(processed_video))
|
215 |
|
216 |
progress(0.2, desc="Preparing generation parameters...")
|
217 |
|