Manjushri commited on
Commit
e9eef29
·
verified ·
1 Parent(s): 7344c92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -18,7 +18,7 @@ device = 'cuda' if torch.cuda.is_available() else 'cpu'
18
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16")
19
  pipe = pipe.to(device)
20
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
21
-
22
  max_64_bit_int = 2**63 - 1
23
 
24
  def sample(
@@ -27,7 +27,7 @@ def sample(
27
  randomize_seed: bool = True,
28
  motion_bucket_id: int = 127,
29
  fps_id: int = 6,
30
- version: str = "svd_xt",
31
  cond_aug: float = 0.02,
32
  decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
33
  device: str = "cpu",
@@ -101,4 +101,4 @@ with gr.Blocks() as demo:
101
 
102
  if __name__ == "__main__":
103
  demo.queue(max_size=20, api_open=False)
104
- demo.launch(share=True, show_api=False)
 
18
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16")
19
  pipe = pipe.to(device)
20
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
21
+ pipe.enable_xformers_memory_efficient_attention()
22
  max_64_bit_int = 2**63 - 1
23
 
24
  def sample(
 
27
  randomize_seed: bool = True,
28
  motion_bucket_id: int = 127,
29
  fps_id: int = 6,
30
+ version: str = "svd_xt_1-1",
31
  cond_aug: float = 0.02,
32
  decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
33
  device: str = "cpu",
 
101
 
102
  if __name__ == "__main__":
103
  demo.queue(max_size=20, api_open=False)
104
+ demo.launch(show_api=False)