Spaces:
Running
Running
Commit
·
9c8f470
1
Parent(s):
82cc5d2
options/Video_model/Model.py
CHANGED
@@ -7,6 +7,7 @@ from .tdd_svd_scheduler import TDDSVDStochasticIterativeScheduler
|
|
7 |
from .utils import load_lora_weights, save_video
|
8 |
from typing import Optional
|
9 |
from glob import glob
|
|
|
10 |
|
11 |
svd_path = 'stabilityai/stable-video-diffusion-img2vid-xt-1-1'
|
12 |
lora_repo_path = 'RED-AIGC/TDD'
|
@@ -47,14 +48,9 @@ def Video(
|
|
47 |
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
48 |
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
49 |
|
50 |
-
if isinstance(image,
|
51 |
-
|
52 |
-
|
53 |
-
image = image.to(dtype=torch.float32)
|
54 |
-
elif isinstance(image, Image.Image):
|
55 |
-
# Convert PIL Image to a supported tensor
|
56 |
-
image = torch.tensor(image).float()
|
57 |
-
|
58 |
|
59 |
with torch.autocast(device):
|
60 |
frames = pipeline(
|
|
|
7 |
from .utils import load_lora_weights, save_video
|
8 |
from typing import Optional
|
9 |
from glob import glob
|
10 |
+
import torchvision.transforms as transforms
|
11 |
|
12 |
svd_path = 'stabilityai/stable-video-diffusion-img2vid-xt-1-1'
|
13 |
lora_repo_path = 'RED-AIGC/TDD'
|
|
|
48 |
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
49 |
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
50 |
|
51 |
+
if isinstance(image, Image.Image):
|
52 |
+
transform = transforms.ToTensor()
|
53 |
+
image = transform(image).unsqueeze(0).to(device).float()
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
with torch.autocast(device):
|
56 |
frames = pipeline(
|
options/Video_model/__pycache__/Model.cpython-310.pyc
CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ
|
|