ovi054 commited on
Commit
6b75a12
·
verified ·
1 Parent(s): baeae45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -1,12 +1,15 @@
1
  import spaces
2
  import torch
3
  from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
 
4
  from PIL import Image
5
  import numpy as np
6
  import gradio as gr
7
  import os
8
 
9
 
 
 
10
  # model_id = "hunyuanvideo-community/HunyuanVideo"
11
 
12
  model_id = "FastVideo/FastHunyuan-diffusers"
@@ -26,7 +29,11 @@ def generate(prompt, width=832, height=832, num_inference_steps=30, lora_id=None
26
  if lora_id and lora_id.strip() != "":
27
  pipe.unload_lora_weights()
28
  pipe.load_lora_weights(lora_id.strip())
29
- pipe.to("cuda")
 
 
 
 
30
  torch.cuda.empty_cache()
31
  try:
32
  output = pipe(
 
1
  import spaces
2
  import torch
3
  from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
4
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
5
  from PIL import Image
6
  import numpy as np
7
  import gradio as gr
8
  import os
9
 
10
 
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+
13
  # model_id = "hunyuanvideo-community/HunyuanVideo"
14
 
15
  model_id = "FastVideo/FastHunyuan-diffusers"
 
29
  if lora_id and lora_id.strip() != "":
30
  pipe.unload_lora_weights()
31
  pipe.load_lora_weights(lora_id.strip())
32
+ apply_cache_on_pipe(
33
+ pipe,
34
+ # residual_diff_threshold=0.2,
35
+ )
36
+ # pipe.to("cuda")
37
  torch.cuda.empty_cache()
38
  try:
39
  output = pipe(