Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import torch
|
|
3 |
import numpy as np
|
4 |
import modin.pandas as pd
|
5 |
from PIL import Image
|
6 |
-
from diffusers import
|
7 |
from huggingface_hub import login
|
8 |
import os
|
9 |
from glob import glob
|
@@ -15,8 +15,9 @@ import random
|
|
15 |
token = os.environ['HF_TOKEN']
|
16 |
login(token=token)
|
17 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
18 |
-
pipe =
|
19 |
pipe = pipe.to(device)
|
|
|
20 |
|
21 |
max_64_bit_int = 2**63 - 1
|
22 |
|
@@ -26,11 +27,12 @@ def sample(
|
|
26 |
randomize_seed: bool = True,
|
27 |
motion_bucket_id: int = 127,
|
28 |
fps_id: int = 6,
|
|
|
29 |
cond_aug: float = 0.02,
|
30 |
decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
31 |
-
device: str = "
|
32 |
-
output_folder: str = "outputs",
|
33 |
-
|
34 |
if image.mode == "RGBA":
|
35 |
image = image.convert("RGB")
|
36 |
|
@@ -48,7 +50,7 @@ def sample(
|
|
48 |
|
49 |
return video_path, seed
|
50 |
|
51 |
-
def resize_image(image, output_size=(
|
52 |
# Calculate aspect ratios
|
53 |
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
54 |
image_aspect = image.width / image.height # Aspect ratio of the original image
|
@@ -80,9 +82,9 @@ def resize_image(image, output_size=(1024, 578)):
|
|
80 |
return cropped_image
|
81 |
|
82 |
with gr.Blocks() as demo:
|
83 |
-
gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
|
84 |
#### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
|
85 |
-
''')
|
86 |
with gr.Row():
|
87 |
with gr.Column():
|
88 |
image = gr.Image(label="Upload your image", type="pil")
|
@@ -95,9 +97,8 @@ with gr.Blocks() as demo:
|
|
95 |
fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
|
96 |
|
97 |
image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
|
98 |
-
generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
|
99 |
-
|
100 |
|
101 |
if __name__ == "__main__":
|
102 |
demo.queue(max_size=20, api_open=False)
|
103 |
-
demo.launch(show_api=False)
|
|
|
3 |
import numpy as np
|
4 |
import modin.pandas as pd
|
5 |
from PIL import Image
|
6 |
+
from diffusers import DiffusionPipeline
|
7 |
from huggingface_hub import login
|
8 |
import os
|
9 |
from glob import glob
|
|
|
15 |
token = os.environ['HF_TOKEN']
|
16 |
login(token=token)
|
17 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
18 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16")
|
19 |
pipe = pipe.to(device)
|
20 |
+
#pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
21 |
|
22 |
max_64_bit_int = 2**63 - 1
|
23 |
|
|
|
27 |
randomize_seed: bool = True,
|
28 |
motion_bucket_id: int = 127,
|
29 |
fps_id: int = 6,
|
30 |
+
version: str = "svd_xt",
|
31 |
cond_aug: float = 0.02,
|
32 |
decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
|
33 |
+
device: str = "cpu",
|
34 |
+
output_folder: str = "outputs",):
|
35 |
+
|
36 |
if image.mode == "RGBA":
|
37 |
image = image.convert("RGB")
|
38 |
|
|
|
50 |
|
51 |
return video_path, seed
|
52 |
|
53 |
+
def resize_image(image, output_size=(768, 322)):
|
54 |
# Calculate aspect ratios
|
55 |
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
56 |
image_aspect = image.width / image.height # Aspect ratio of the original image
|
|
|
82 |
return cropped_image
|
83 |
|
84 |
with gr.Blocks() as demo:
|
85 |
+
#gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
|
86 |
#### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
|
87 |
+
#''')
|
88 |
with gr.Row():
|
89 |
with gr.Column():
|
90 |
image = gr.Image(label="Upload your image", type="pil")
|
|
|
97 |
fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
|
98 |
|
99 |
image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
|
100 |
+
generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video",)# inputs=image, outputs=[video, seed], fn=sample, cache_examples=True,)
|
|
|
101 |
|
102 |
if __name__ == "__main__":
|
103 |
demo.queue(max_size=20, api_open=False)
|
104 |
+
demo.launch(share=True, show_api=False)
|