Spaces:
Runtime error
Runtime error
Commit
·
ff2f391
1
Parent(s):
ceb3bb4
options/Video_model/Model.py
CHANGED
@@ -12,10 +12,12 @@ svd_path = 'stabilityai/stable-video-diffusion-img2vid-xt-1-1'
|
|
12 |
lora_repo_path = 'RED-AIGC/TDD'
|
13 |
lora_weight_name = 'svd-xt-1-1_tdd_lora_weights.safetensors'
|
14 |
|
|
|
|
|
15 |
# if torch.cuda.is_available():
|
16 |
noise_scheduler = TDDSVDStochasticIterativeScheduler(num_train_timesteps = 250, sigma_min = 0.002, sigma_max = 700.0, sigma_data = 1.0,
|
17 |
s_noise = 1.0, rho = 7, clip_denoised = False)
|
18 |
-
pipeline = StableVideoDiffusionPipeline.from_pretrained(svd_path, scheduler = noise_scheduler, torch_dtype = torch.float16, variant = "fp16").to(
|
19 |
load_lora_weights(pipeline.unet, lora_repo_path, weight_name = lora_weight_name)
|
20 |
|
21 |
# @spaces.GPU
|
@@ -44,7 +46,7 @@ def Video(
|
|
44 |
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
45 |
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
46 |
|
47 |
-
with torch.autocast(
|
48 |
frames = pipeline(
|
49 |
image, height = height, width = width,
|
50 |
num_inference_steps = num_inference_steps,
|
|
|
12 |
lora_repo_path = 'RED-AIGC/TDD'
|
13 |
lora_weight_name = 'svd-xt-1-1_tdd_lora_weights.safetensors'
|
14 |
|
15 |
+
device="cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
|
17 |
# if torch.cuda.is_available():
|
18 |
noise_scheduler = TDDSVDStochasticIterativeScheduler(num_train_timesteps = 250, sigma_min = 0.002, sigma_max = 700.0, sigma_data = 1.0,
|
19 |
s_noise = 1.0, rho = 7, clip_denoised = False)
|
20 |
+
pipeline = StableVideoDiffusionPipeline.from_pretrained(svd_path, scheduler = noise_scheduler, torch_dtype = torch.float16, variant = "fp16").to(device)
|
21 |
load_lora_weights(pipeline.unet, lora_repo_path, weight_name = lora_weight_name)
|
22 |
|
23 |
# @spaces.GPU
|
|
|
46 |
base_count = len(glob(os.path.join(output_folder, "*.mp4")))
|
47 |
video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
|
48 |
|
49 |
+
with torch.autocast(device):
|
50 |
frames = pipeline(
|
51 |
image, height = height, width = width,
|
52 |
num_inference_steps = num_inference_steps,
|