cakemus commited on
Commit
b950d1e
·
1 Parent(s): b67e239
Files changed (1) hide show
  1. app.py +94 -94
app.py CHANGED
@@ -1,128 +1,128 @@
1
  import gradio as gr
 
 
2
  import torch
3
  import os
4
  from glob import glob
 
5
  from typing import Optional
 
6
  from diffusers import StableVideoDiffusionPipeline
7
- from diffusers.utils import export_to_video
8
  from PIL import Image
 
 
9
  import random
10
- from moviepy import VideoFileClip, concatenate_videoclips
 
 
11
 
12
- # Load the Stable Video Diffusion Pipeline
13
  pipe = StableVideoDiffusionPipeline.from_pretrained(
14
- "stabilityai/stable-video-diffusion-img2vid-xt",
15
- torch_dtype=torch.float16,
16
- variant="fp16"
17
  )
18
  pipe.to("cuda")
 
 
19
 
20
- # Maximum seed value
21
  max_64_bit_int = 2**63 - 1
22
 
23
- # Resize and crop image to desired resolution
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def resize_image(image, output_size=(1024, 576)):
25
- target_aspect = output_size[0] / output_size[1]
26
- image_aspect = image.width / image.height
 
27
 
 
28
  if image_aspect > target_aspect:
 
29
  new_height = output_size[1]
30
  new_width = int(new_height * image_aspect)
31
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
 
32
  left = (new_width - output_size[0]) / 2
 
33
  right = (new_width + output_size[0]) / 2
34
- top, bottom = 0, output_size[1]
35
  else:
 
36
  new_width = output_size[0]
37
  new_height = int(new_width / image_aspect)
38
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
39
- left, right = 0, output_size[0]
 
40
  top = (new_height - output_size[1]) / 2
 
41
  bottom = (new_height + output_size[1]) / 2
42
 
43
- return resized_image.crop((left, top, right, bottom))
44
-
45
- # Combine multiple video snippets into a single video
46
- def combine_videos(video_paths, output_path="outputs/final_long_video.mp4"):
47
- os.makedirs("outputs", exist_ok=True)
48
- clips = [VideoFileClip(vp) for vp in video_paths]
49
- final_clip = concatenate_videoclips(clips, method="compose")
50
- final_clip.write_videofile(output_path, codec="libx264", fps=clips[0].fps, audio=False)
51
- return output_path
52
-
53
- # Generate a video snippet from an input image
54
- def generate_snippet(
55
- init_image: Image, seed: int, motion_bucket_id: int, fps_id: int, decoding_t: int, output_folder: str
56
- ):
57
- generator = torch.manual_seed(seed)
58
- os.makedirs(output_folder, exist_ok=True)
59
- base_count = len(glob(os.path.join(output_folder, "*.mp4")))
60
- video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
61
-
62
- result = pipe(
63
- init_image,
64
- decode_chunk_size=decoding_t,
65
- generator=generator,
66
- motion_bucket_id=motion_bucket_id,
67
- noise_aug_strength=0.1,
68
- num_frames=25
69
- )
70
- frames = result.frames[0]
71
- export_to_video(frames, video_path, fps=fps_id)
72
-
73
- return frames[-1], video_path
74
-
75
- # Generate a long video composed of 5 short snippets
76
- def sample_long(
77
- image: Image,
78
- seed: Optional[int] = 42,
79
- randomize_seed: bool = True,
80
- motion_bucket_id: int = 127,
81
- fps_id: int = 6,
82
- decoding_t: int = 3,
83
- output_folder: str = "outputs"
84
- ):
85
- if image.mode == "RGBA":
86
- image = image.convert("RGB")
87
- if randomize_seed:
88
- seed = random.randint(0, max_64_bit_int)
89
-
90
- snippet_paths = []
91
- current_image = image
92
- for _ in range(5):
93
- current_image, snippet_path = generate_snippet(
94
- init_image=current_image,
95
- seed=seed,
96
- motion_bucket_id=motion_bucket_id,
97
- fps_id=fps_id,
98
- decoding_t=decoding_t,
99
- output_folder=output_folder
100
- )
101
- snippet_paths.append(snippet_path)
102
-
103
- return combine_videos(snippet_paths), seed
104
 
105
- # Build the Gradio interface
106
  with gr.Blocks() as demo:
107
- gr.Markdown("### Stable Video Diffusion - Generate a Long Video")
108
-
109
- with gr.Row():
110
- with gr.Column():
111
- image = gr.Image(label="Upload an image", type="pil")
112
- generate_btn = gr.Button("Generate Long Video")
113
- video_output = gr.Video()
114
-
115
- with gr.Accordion("Advanced Options", open=False):
116
- seed = gr.Slider(0, max_64_bit_int, value=42, step=1, label="Seed")
117
- randomize_seed = gr.Checkbox(value=True, label="Randomize Seed")
118
- motion_bucket_id = gr.Slider(1, 255, value=127, step=1, label="Motion Bucket ID")
119
- fps_id = gr.Slider(5, 30, value=6, step=1, label="Frames Per Second")
120
-
121
- generate_btn.click(
122
- sample_long,
123
- inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id],
124
- outputs=[video_output, seed]
125
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  if __name__ == "__main__":
128
- demo.launch(share=True)
 
 
1
  import gradio as gr
2
+ import spaces
3
+ #import gradio.helpers
4
  import torch
5
  import os
6
  from glob import glob
7
+ from pathlib import Path
8
  from typing import Optional
9
+
10
  from diffusers import StableVideoDiffusionPipeline
11
+ from diffusers.utils import load_image, export_to_video
12
  from PIL import Image
13
+
14
+ import uuid
15
  import random
16
+ from huggingface_hub import hf_hub_download
17
+
18
+ #gradio.helpers.CACHED_FOLDER = '/data/cache'
19
 
 
20
  pipe = StableVideoDiffusionPipeline.from_pretrained(
21
+ "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
 
 
22
  )
23
  pipe.to("cuda")
24
+ #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
25
+ #pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
26
 
 
27
  max_64_bit_int = 2**63 - 1
28
 
29
+ @spaces.GPU(duration=120)
30
+ def sample(
31
+ image: Image,
32
+ seed: Optional[int] = 42,
33
+ randomize_seed: bool = True,
34
+ motion_bucket_id: int = 127,
35
+ fps_id: int = 6,
36
+ version: str = "svd_xt",
37
+ cond_aug: float = 0.02,
38
+ decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
39
+ device: str = "cuda",
40
+ output_folder: str = "outputs",
41
+ progress=gr.Progress(track_tqdm=True)
42
+ ):
43
+ if image.mode == "RGBA":
44
+ image = image.convert("RGB")
45
+
46
+ if(randomize_seed):
47
+ seed = random.randint(0, max_64_bit_int)
48
+ generator = torch.manual_seed(seed)
49
+
50
+ os.makedirs(output_folder, exist_ok=True)
51
+ base_count = len(glob(os.path.join(output_folder, "*.mp4")))
52
+ video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
53
+
54
+ frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
55
+ export_to_video(frames, video_path, fps=fps_id)
56
+ torch.manual_seed(seed)
57
+
58
+ return video_path, seed
59
+
60
  def resize_image(image, output_size=(1024, 576)):
61
+ # Calculate aspect ratios
62
+ target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
63
+ image_aspect = image.width / image.height # Aspect ratio of the original image
64
 
65
+ # Resize then crop if the original image is larger
66
  if image_aspect > target_aspect:
67
+ # Resize the image to match the target height, maintaining aspect ratio
68
  new_height = output_size[1]
69
  new_width = int(new_height * image_aspect)
70
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
71
+ # Calculate coordinates for cropping
72
  left = (new_width - output_size[0]) / 2
73
+ top = 0
74
  right = (new_width + output_size[0]) / 2
75
+ bottom = output_size[1]
76
  else:
77
+ # Resize the image to match the target width, maintaining aspect ratio
78
  new_width = output_size[0]
79
  new_height = int(new_width / image_aspect)
80
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
81
+ # Calculate coordinates for cropping
82
+ left = 0
83
  top = (new_height - output_size[1]) / 2
84
+ right = output_size[0]
85
  bottom = (new_height + output_size[1]) / 2
86
 
87
+ # Crop the image
88
+ cropped_image = resized_image.crop((left, top, right, bottom))
89
+ return cropped_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
 
91
  with gr.Blocks() as demo:
92
+ gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt), [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets), [stability's ui waitlist](https://stability.ai/contact))
93
+ #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)): generate `4s` vid from a single image at (`25 frames` at `6 fps`). this demo uses [🧨 diffusers for low VRAM and fast generation](https://huggingface.co/docs/diffusers/main/en/using-diffusers/svd).
94
+ ''')
95
+ with gr.Row():
96
+ with gr.Column():
97
+ image = gr.Image(label="Upload your image", type="pil")
98
+ generate_btn = gr.Button("Generate")
99
+ video = gr.Video()
100
+ with gr.Accordion("Advanced options", open=False):
101
+ seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
102
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
103
+ motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
104
+ fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
105
+
106
+ image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
107
+ generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
108
+ gr.Examples(
109
+ examples=[
110
+ "images/blink_meme.png",
111
+ "images/confused2_meme.png",
112
+ "images/disaster_meme.png",
113
+ "images/distracted_meme.png",
114
+ "images/hide_meme.png",
115
+ "images/nazare_meme.png",
116
+ "images/success_meme.png",
117
+ "images/willy_meme.png",
118
+ "images/wink_meme.png"
119
+ ],
120
+ inputs=image,
121
+ outputs=[video, seed],
122
+ fn=sample,
123
+ cache_examples="lazy",
124
+ )
125
 
126
  if __name__ == "__main__":
127
+ #demo.queue(max_size=20, api_open=False)
128
+ demo.launch(share=True, show_api=False)