Manjushri commited on
Commit
9c7a553
·
verified ·
1 Parent(s): 3c9e972

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -13,17 +13,18 @@ login(token=token)
13
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
  torch.cuda.max_memory_allocated(device=device)
15
  torch.cuda.empty_cache()
 
 
 
 
 
 
16
 
17
 
18
  def genie(src_image):
19
  torch.cuda.max_memory_allocated(device=device)
20
  torch.cuda.empty_cache()
21
- pipe = StableVideoDiffusionPipeline.from_pretrained("multimodalart/stable-video-diffusion", variant="fp16", use_safetensors=True)
22
- #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
23
-
24
- pipe.enable_xformers_memory_efficient_attention()
25
- pipe = pipe.to(device)
26
- torch.cuda.empty_cache()
27
  frames = pipe(image=src_image).images[0]
28
  torch.cuda.empty_cache()
29
  return frames
 
13
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
  torch.cuda.max_memory_allocated(device=device)
15
  torch.cuda.empty_cache()
16
+ pipe = StableVideoDiffusionPipeline.from_pretrained("multimodalart/stable-video-diffusion", variant="fp16", use_safetensors=True)
17
+ #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
18
+
19
+ pipe.enable_xformers_memory_efficient_attention()
20
+ pipe = pipe.to(device)
21
+ torch.cuda.empty_cache()
22
 
23
 
24
  def genie(src_image):
25
  torch.cuda.max_memory_allocated(device=device)
26
  torch.cuda.empty_cache()
27
+
 
 
 
 
 
28
  frames = pipe(image=src_image).images[0]
29
  torch.cuda.empty_cache()
30
  return frames