cakemus commited on
Commit
e47fa4e
·
1 Parent(s): a3a35fb

revert 17 nov

Browse files
Files changed (1) hide show
  1. app.py +23 -2
app.py CHANGED
@@ -17,8 +17,12 @@ from huggingface_hub import hf_hub_download
17
 
18
  #gradio.helpers.CACHED_FOLDER = '/data/cache'
19
 
 
20
  pipe = StableVideoDiffusionPipeline.from_pretrained(
21
- "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
 
 
 
22
  )
23
  pipe.to("cuda")
24
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
@@ -43,7 +47,7 @@ def sample(
43
  if image.mode == "RGBA":
44
  image = image.convert("RGB")
45
 
46
- if(randomize_seed):
47
  seed = random.randint(0, max_64_bit_int)
48
  generator = torch.manual_seed(seed)
49
 
@@ -105,6 +109,23 @@ with gr.Blocks() as demo:
105
 
106
  image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
107
  generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
  if __name__ == "__main__":
110
  #demo.queue(max_size=20, api_open=False)
 
17
 
18
  #gradio.helpers.CACHED_FOLDER = '/data/cache'
19
 
20
+ # Load the pipeline with authentication token
21
  pipe = StableVideoDiffusionPipeline.from_pretrained(
22
+ "multimodalart/stable-video-diffusion",
23
+ torch_dtype=torch.float16,
24
+ variant="fp16",
25
+ use_auth_token=os.getenv("HUGGINGFACE_TOKEN") # Fetch the token from the environment variable
26
  )
27
  pipe.to("cuda")
28
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
47
  if image.mode == "RGBA":
48
  image = image.convert("RGB")
49
 
50
+ if randomize_seed:
51
  seed = random.randint(0, max_64_bit_int)
52
  generator = torch.manual_seed(seed)
53
 
 
109
 
110
  image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
111
  generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
112
+ gr.Examples(
113
+ examples=[
114
+ "images/blink_meme.png",
115
+ "images/confused2_meme.png",
116
+ "images/disaster_meme.png",
117
+ "images/distracted_meme.png",
118
+ "images/hide_meme.png",
119
+ "images/nazare_meme.png",
120
+ "images/success_meme.png",
121
+ "images/willy_meme.png",
122
+ "images/wink_meme.png"
123
+ ],
124
+ inputs=image,
125
+ outputs=[video, seed],
126
+ fn=sample,
127
+ cache_examples="lazy",
128
+ )
129
 
130
  if __name__ == "__main__":
131
  #demo.queue(max_size=20, api_open=False)