cakemus commited on
Commit
1da8dac
·
1 Parent(s): ec9e5f3

stablediff test

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +123 -24
  3. requirements.txt +6 -3
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ __pycache__/
app.py CHANGED
@@ -1,29 +1,128 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- from spaces import GPU # Import the GPU decorator for ZeroGPU
4
-
5
- # Use @GPU to allocate a GPU when running the video generation function
6
- @GPU
7
- def generate_video(prompt, image):
8
- # Load the Kandinsky video model
9
- video_model = pipeline("video-generation", model="ai-forever/KandinskyVideo_1_1", device=0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- # Generate video based on the prompt and image
12
- video = video_model(prompt=prompt, init_image=image)
13
- return video # Ensure this returns the video file or frames
14
-
15
- # Create the Gradio interface with text and image inputs
16
- interface = gr.Interface(
17
- fn=generate_video,
18
- inputs=[
19
- gr.Textbox(label="Enter your prompt here"),
20
- gr.Image(label="Upload an initial image") # Image upload input
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  ],
22
- outputs="video",
23
- title="AI Video Generator",
24
- description="This app generates a short video based on your input prompt and initial image using Kandinsky 1.1.",
25
- theme="dark"
26
- )
27
 
28
  if __name__ == "__main__":
29
- interface.launch()
 
 
1
  import gradio as gr
2
+ import spaces
3
+ #import gradio.helpers
4
+ import torch
5
+ import os
6
+ from glob import glob
7
+ from pathlib import Path
8
+ from typing import Optional
9
+
10
+ from diffusers import StableVideoDiffusionPipeline
11
+ from diffusers.utils import load_image, export_to_video
12
+ from PIL import Image
13
+
14
+ import uuid
15
+ import random
16
+ from huggingface_hub import hf_hub_download
17
+
18
+ #gradio.helpers.CACHED_FOLDER = '/data/cache'
19
+
20
+ pipe = StableVideoDiffusionPipeline.from_pretrained(
21
+ "multimodalart/stable-video-diffusion", torch_dtype=torch.float16, variant="fp16"
22
+ )
23
+ pipe.to("cuda")
24
+ #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
25
+ #pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
26
+
27
+ max_64_bit_int = 2**63 - 1
28
+
29
+ @spaces.GPU(duration=120)
30
+ def sample(
31
+ image: Image,
32
+ seed: Optional[int] = 42,
33
+ randomize_seed: bool = True,
34
+ motion_bucket_id: int = 127,
35
+ fps_id: int = 6,
36
+ version: str = "svd_xt",
37
+ cond_aug: float = 0.02,
38
+ decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
39
+ device: str = "cuda",
40
+ output_folder: str = "outputs",
41
+ progress=gr.Progress(track_tqdm=True)
42
+ ):
43
+ if image.mode == "RGBA":
44
+ image = image.convert("RGB")
45
+
46
+ if(randomize_seed):
47
+ seed = random.randint(0, max_64_bit_int)
48
+ generator = torch.manual_seed(seed)
49
 
50
+ os.makedirs(output_folder, exist_ok=True)
51
+ base_count = len(glob(os.path.join(output_folder, "*.mp4")))
52
+ video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
53
+
54
+ frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
55
+ export_to_video(frames, video_path, fps=fps_id)
56
+ torch.manual_seed(seed)
57
+
58
+ return video_path, seed
59
+
60
+ def resize_image(image, output_size=(1024, 576)):
61
+ # Calculate aspect ratios
62
+ target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
63
+ image_aspect = image.width / image.height # Aspect ratio of the original image
64
+
65
+ # Resize then crop if the original image is larger
66
+ if image_aspect > target_aspect:
67
+ # Resize the image to match the target height, maintaining aspect ratio
68
+ new_height = output_size[1]
69
+ new_width = int(new_height * image_aspect)
70
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
71
+ # Calculate coordinates for cropping
72
+ left = (new_width - output_size[0]) / 2
73
+ top = 0
74
+ right = (new_width + output_size[0]) / 2
75
+ bottom = output_size[1]
76
+ else:
77
+ # Resize the image to match the target width, maintaining aspect ratio
78
+ new_width = output_size[0]
79
+ new_height = int(new_width / image_aspect)
80
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
81
+ # Calculate coordinates for cropping
82
+ left = 0
83
+ top = (new_height - output_size[1]) / 2
84
+ right = output_size[0]
85
+ bottom = (new_height + output_size[1]) / 2
86
+
87
+ # Crop the image
88
+ cropped_image = resized_image.crop((left, top, right, bottom))
89
+ return cropped_image
90
+
91
+ with gr.Blocks() as demo:
92
+ gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
93
+ #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
94
+ ''')
95
+ with gr.Row():
96
+ with gr.Column():
97
+ image = gr.Image(label="Upload your image", type="pil")
98
+ generate_btn = gr.Button("Generate")
99
+ video = gr.Video()
100
+ with gr.Accordion("Advanced options", open=False):
101
+ seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
102
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
103
+ motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
104
+ fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
105
+
106
+ image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
107
+ generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
108
+ gr.Examples(
109
+ examples=[
110
+ "images/blink_meme.png",
111
+ "images/confused2_meme.png",
112
+ "images/disaster_meme.png",
113
+ "images/distracted_meme.png",
114
+ "images/hide_meme.png",
115
+ "images/nazare_meme.png",
116
+ "images/success_meme.png",
117
+ "images/willy_meme.png",
118
+ "images/wink_meme.png"
119
  ],
120
+ inputs=image,
121
+ outputs=[video, seed],
122
+ fn=sample,
123
+ cache_examples="lazy",
124
+ )
125
 
126
  if __name__ == "__main__":
127
+ #demo.queue(max_size=20, api_open=False)
128
+ demo.launch(share=True, show_api=False)
requirements.txt CHANGED
@@ -1,4 +1,7 @@
1
- gradio
 
2
  transformers
3
- torch
4
- Pillow
 
 
 
1
+ https://gradio-builds.s3.amazonaws.com/756e3431d65172df986a7e335dce8136206a293a/gradio-4.7.1-py3-none-any.whl
2
+ git+https://github.com/huggingface/diffusers.git
3
  transformers
4
+ accelerate
5
+ safetensors
6
+ opencv-python
7
+ uuid