Tonic commited on
Commit
dc2eed4
·
verified ·
1 Parent(s): 5c6a167

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -12,6 +12,13 @@ import uuid
12
  import random
13
  from huggingface_hub import hf_hub_download
14
 
 
 
 
 
 
 
 
15
  pipe = StableVideoDiffusionPipeline.from_pretrained(
16
  "stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16"
17
  )
@@ -21,7 +28,7 @@ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
21
 
22
  max_64_bit_int = 2**63 - 1
23
 
24
- @spaces.GPU
25
  def sample(
26
  image: Image,
27
  seed: Optional[int] = 42,
@@ -83,9 +90,7 @@ def resize_image(image, output_size=(1024, 576)):
83
  return cropped_image
84
 
85
  with gr.Blocks() as demo:
86
- gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
87
- #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
88
- ''')
89
  with gr.Row():
90
  with gr.Column():
91
  image = gr.Image(label="Upload your image", type="pil")
 
12
  import random
13
  from huggingface_hub import hf_hub_download
14
 
15
+ title = '''# 👋🏻Welcome to Tonic's🌟🎥StableVideo XT-1-1
16
+ 🌟🎥StableVideo XT-1-1 (SVD) Image-to-Video is a latent diffusion model trained to generate short video clips from an image conditioning. Check out the [Community demo for Stable Video Diffusion](https://huggingface.co/spaces/multimodalart/stable-video-diffusion) - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt-1-1), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
17
+ #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
18
+ Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to 🌟 [SciTonic](https://github.com/Tonic-AI/scitonic) 🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
19
+ '''
20
+
21
+
22
  pipe = StableVideoDiffusionPipeline.from_pretrained(
23
  "stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16"
24
  )
 
28
 
29
  max_64_bit_int = 2**63 - 1
30
 
31
+ @spaces.GPU(enable_queue=True)
32
  def sample(
33
  image: Image,
34
  seed: Optional[int] = 42,
 
90
  return cropped_image
91
 
92
  with gr.Blocks() as demo:
93
+ gr.Markdown(title)
 
 
94
  with gr.Row():
95
  with gr.Column():
96
  image = gr.Image(label="Upload your image", type="pil")