Spaces:
Running
Running
Commit
·
06f6199
1
Parent(s):
f9b4d00
options/Video_model/Model.py
CHANGED
@@ -7,7 +7,6 @@ from .tdd_svd_scheduler import TDDSVDStochasticIterativeScheduler
|
|
7 |
from .utils import load_lora_weights, save_video
|
8 |
from typing import Optional
|
9 |
from glob import glob
|
10 |
-
import torchvision.transforms as transforms
|
11 |
|
12 |
svd_path = 'stabilityai/stable-video-diffusion-img2vid-xt-1-1'
|
13 |
lora_repo_path = 'RED-AIGC/TDD'
|
@@ -47,11 +46,9 @@ def Video(
|
|
47 |
os.makedirs(output_folder, exist_ok=True)
|
48 |
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
49 |
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
|
|
50 |
|
51 |
-
|
52 |
-
image = transforms.ToTensor()(image).unsqueeze(0).to(device)
|
53 |
-
|
54 |
-
with torch.autocast(device,dtype=torch.float32):
|
55 |
frames = pipeline(
|
56 |
image, height = height, width = width,
|
57 |
num_inference_steps = num_inference_steps,
|
|
|
7 |
from .utils import load_lora_weights, save_video
|
8 |
from typing import Optional
|
9 |
from glob import glob
|
|
|
10 |
|
11 |
svd_path = 'stabilityai/stable-video-diffusion-img2vid-xt-1-1'
|
12 |
lora_repo_path = 'RED-AIGC/TDD'
|
|
|
46 |
os.makedirs(output_folder, exist_ok=True)
|
47 |
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
48 |
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
49 |
+
|
50 |
|
51 |
+
with torch.autocast(device):
|
|
|
|
|
|
|
52 |
frames = pipeline(
|
53 |
image, height = height, width = width,
|
54 |
num_inference_steps = num_inference_steps,
|
options/Video_model/__pycache__/Model.cpython-310.pyc
CHANGED
Binary files a/options/Video_model/__pycache__/Model.cpython-310.pyc and b/options/Video_model/__pycache__/Model.cpython-310.pyc differ
|
|