cakemus commited on
Commit
2efae84
·
1 Parent(s): 1da8dac
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -17,8 +17,12 @@ from huggingface_hub import hf_hub_download
17
 
18
  #gradio.helpers.CACHED_FOLDER = '/data/cache'
19
 
 
20
  pipe = StableVideoDiffusionPipeline.from_pretrained(
21
- "multimodalart/stable-video-diffusion", torch_dtype=torch.float16, variant="fp16"
 
 
 
22
  )
23
  pipe.to("cuda")
24
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
@@ -43,7 +47,7 @@ def sample(
43
  if image.mode == "RGBA":
44
  image = image.convert("RGB")
45
 
46
- if(randomize_seed):
47
  seed = random.randint(0, max_64_bit_int)
48
  generator = torch.manual_seed(seed)
49
 
@@ -125,4 +129,4 @@ with gr.Blocks() as demo:
125
 
126
  if __name__ == "__main__":
127
  #demo.queue(max_size=20, api_open=False)
128
- demo.launch(share=True, show_api=False)
 
17
 
18
  #gradio.helpers.CACHED_FOLDER = '/data/cache'
19
 
20
+ # Load the pipeline with authentication token
21
  pipe = StableVideoDiffusionPipeline.from_pretrained(
22
+ "multimodalart/stable-video-diffusion",
23
+ torch_dtype=torch.float16,
24
+ variant="fp16",
25
+ use_auth_token=os.getenv("HUGGINGFACE_TOKEN") # Fetch the token from the environment variable
26
  )
27
  pipe.to("cuda")
28
  #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
47
  if image.mode == "RGBA":
48
  image = image.convert("RGB")
49
 
50
+ if randomize_seed:
51
  seed = random.randint(0, max_64_bit_int)
52
  generator = torch.manual_seed(seed)
53
 
 
129
 
130
  if __name__ == "__main__":
131
  #demo.queue(max_size=20, api_open=False)
132
+ demo.launch(share=True, show_api=False)