cakemus commited on
Commit
c31fb3d
·
1 Parent(s): 2b4238b
Files changed (1) hide show
  1. app.py +39 -123
app.py CHANGED
@@ -1,38 +1,27 @@
1
  import gradio as gr
2
- import spaces
3
- #import gradio.helpers
4
  import torch
5
  import os
6
  from glob import glob
7
- from pathlib import Path
8
  from typing import Optional
9
-
10
  from diffusers import StableVideoDiffusionPipeline
11
- from diffusers.utils import load_image, export_to_video
12
  from PIL import Image
13
-
14
- import uuid
15
  import random
16
- from huggingface_hub import hf_hub_download
17
-
18
- from moviepy import VideoFileClip, concatenate_videoclips
19
-
20
- #gradio.helpers.CACHED_FOLDER = '/data/cache'
21
 
 
22
  pipe = StableVideoDiffusionPipeline.from_pretrained(
23
- "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16"
 
 
24
  )
25
  pipe.to("cuda")
26
- #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
27
- #pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
28
 
 
29
  max_64_bit_int = 2**63 - 1
30
 
 
31
  def resize_image(image, output_size=(1024, 576)):
32
- """
33
- Resizes/crops the image to match a target resolution without
34
- distorting aspect ratio.
35
- """
36
  target_aspect = output_size[0] / output_size[1]
37
  image_aspect = image.width / image.height
38
 
@@ -41,52 +30,35 @@ def resize_image(image, output_size=(1024, 576)):
41
  new_width = int(new_height * image_aspect)
42
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
43
  left = (new_width - output_size[0]) / 2
44
- top = 0
45
  right = (new_width + output_size[0]) / 2
46
- bottom = output_size[1]
47
  else:
48
  new_width = output_size[0]
49
  new_height = int(new_width / image_aspect)
50
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
51
- left = 0
52
  top = (new_height - output_size[1]) / 2
53
- right = output_size[0]
54
  bottom = (new_height + output_size[1]) / 2
55
 
56
- cropped_image = resized_image.crop((left, top, right, bottom))
57
- return cropped_image
58
 
59
- # NEW CODE HERE:
60
  def combine_videos(video_paths, output_path="outputs/final_long_video.mp4"):
61
- """
62
- Concatenate a list of MP4 videos into one MP4.
63
- """
64
  clips = [VideoFileClip(vp) for vp in video_paths]
65
  final_clip = concatenate_videoclips(clips, method="compose")
66
  final_clip.write_videofile(output_path, codec="libx264", fps=clips[0].fps, audio=False)
67
  return output_path
68
 
69
- # NEW CODE HERE:
70
- # We create a helper function that returns both the frames and the snippet path
71
  def generate_snippet(
72
- init_image: Image,
73
- seed: int,
74
- motion_bucket_id: int,
75
- fps_id: int,
76
- decoding_t: int = 3,
77
- device: str = "cuda",
78
- output_folder: str = "outputs"
79
  ):
80
- """
81
- Generate a short snippet from `init_image` using the pipeline.
82
- Returns: (frames, video_path)
83
- """
84
  generator = torch.manual_seed(seed)
85
  os.makedirs(output_folder, exist_ok=True)
86
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
87
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
88
 
89
- # Generate frames
90
  result = pipe(
91
  init_image,
92
  decode_chunk_size=decoding_t,
@@ -95,118 +67,62 @@ def generate_snippet(
95
  noise_aug_strength=0.1,
96
  num_frames=25
97
  )
98
- frames = result.frames[0] # a list of PIL images
99
-
100
- # Save snippet
101
  export_to_video(frames, video_path, fps=fps_id)
102
 
103
- return frames, video_path
104
 
105
- @spaces.GPU(duration=120)
106
  def sample_long(
107
  image: Image,
108
  seed: Optional[int] = 42,
109
  randomize_seed: bool = True,
110
  motion_bucket_id: int = 127,
111
  fps_id: int = 6,
112
- cond_aug: float = 0.02,
113
- decoding_t: int = 3, # Number of frames decoded at a time! This can be lowered if VRAM is an issue.
114
- device: str = "cuda",
115
- output_folder: str = "outputs",
116
- progress=gr.Progress(track_tqdm=True)
117
  ):
118
- """
119
- Generate 5 snippets in a row. Each new snippet starts from the last frame of the previous snippet.
120
- Return the path to the final, concatenated MP4.
121
- """
122
  if image.mode == "RGBA":
123
  image = image.convert("RGB")
124
-
125
  if randomize_seed:
126
  seed = random.randint(0, max_64_bit_int)
127
- torch.manual_seed(seed)
128
 
129
  snippet_paths = []
130
  current_image = image
131
-
132
- for i in range(5):
133
- frames, snippet_path = generate_snippet(
134
  init_image=current_image,
135
  seed=seed,
136
  motion_bucket_id=motion_bucket_id,
137
  fps_id=fps_id,
138
  decoding_t=decoding_t,
139
- device=device,
140
  output_folder=output_folder
141
  )
142
  snippet_paths.append(snippet_path)
143
 
144
- # Get the last frame for the next snippet
145
- last_frame = frames[-1] # PIL image
146
- current_image = last_frame
147
-
148
- # Optional: re-seed each time if you like randomness in every snippet
149
- # Otherwise, keep the same seed for a more cohesive “style”
150
- # If you want random seeds each snippet, uncomment:
151
- # seed = random.randint(0, max_64_bit_int)
152
-
153
- # Concatenate all snippets
154
- final_video_path = os.path.join(output_folder, "final_long_video.mp4")
155
- final_video_path = combine_videos(snippet_paths, output_path=final_video_path)
156
-
157
- return final_video_path, seed
158
-
159
 
 
160
  with gr.Blocks() as demo:
161
- gr.Markdown('''# Community demo for Stable Video Diffusion - Img2Vid - XT
162
- ([model](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt),
163
- [paper](https://stability.ai/research/stable-video-diffusion-scaling-latent-video-diffusion-models-to-large-datasets),
164
- [stability's ui waitlist](https://stability.ai/contact))
165
- #### Research release ([_non-commercial_](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/blob/main/LICENSE)):
166
- Generate a longer video by chaining together multiple short snippets.
167
- ''')
168
-
169
  with gr.Row():
170
  with gr.Column():
171
- image = gr.Image(label="Upload your image", type="pil")
172
- generate_btn = gr.Button("Generate Long Video (5 snippets)")
173
- video = gr.Video()
174
-
175
- with gr.Accordion("Advanced options", open=False):
176
- seed = gr.Slider(
177
- label="Seed",
178
- value=42,
179
- randomize=True,
180
- minimum=0,
181
- maximum=max_64_bit_int,
182
- step=1
183
- )
184
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
185
- motion_bucket_id = gr.Slider(
186
- label="Motion bucket id",
187
- info="Controls how much motion to add/remove from the image",
188
- value=127,
189
- minimum=1,
190
- maximum=255
191
- )
192
- fps_id = gr.Slider(
193
- label="Frames per second",
194
- info="The length of your video in seconds will be 25/fps",
195
- value=6,
196
- minimum=5,
197
- maximum=30
198
- )
199
-
200
- # Automatically resize on image upload
201
- image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
202
 
203
- # NEW: Generate a *long* video composed of 5 short snippets
204
  generate_btn.click(
205
- fn=sample_long,
206
- inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id],
207
- outputs=[video, seed],
208
- api_name="video"
209
  )
210
 
211
  if __name__ == "__main__":
212
- demo.launch(share=True, show_api=False)
 
1
  import gradio as gr
 
 
2
  import torch
3
  import os
4
  from glob import glob
 
5
  from typing import Optional
 
6
  from diffusers import StableVideoDiffusionPipeline
7
+ from diffusers.utils import export_to_video
8
  from PIL import Image
 
 
9
  import random
10
+ from moviepy.editor import VideoFileClip, concatenate_videoclips
 
 
 
 
11
 
12
+ # Load the Stable Video Diffusion Pipeline
13
  pipe = StableVideoDiffusionPipeline.from_pretrained(
14
+ "stabilityai/stable-video-diffusion-img2vid-xt",
15
+ torch_dtype=torch.float16,
16
+ variant="fp16"
17
  )
18
  pipe.to("cuda")
 
 
19
 
20
+ # Maximum seed value
21
  max_64_bit_int = 2**63 - 1
22
 
23
+ # Resize and crop image to desired resolution
24
  def resize_image(image, output_size=(1024, 576)):
 
 
 
 
25
  target_aspect = output_size[0] / output_size[1]
26
  image_aspect = image.width / image.height
27
 
 
30
  new_width = int(new_height * image_aspect)
31
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
32
  left = (new_width - output_size[0]) / 2
 
33
  right = (new_width + output_size[0]) / 2
34
+ top, bottom = 0, output_size[1]
35
  else:
36
  new_width = output_size[0]
37
  new_height = int(new_width / image_aspect)
38
  resized_image = image.resize((new_width, new_height), Image.LANCZOS)
39
+ left, right = 0, output_size[0]
40
  top = (new_height - output_size[1]) / 2
 
41
  bottom = (new_height + output_size[1]) / 2
42
 
43
+ return resized_image.crop((left, top, right, bottom))
 
44
 
45
+ # Combine multiple video snippets into a single video
46
  def combine_videos(video_paths, output_path="outputs/final_long_video.mp4"):
47
+ os.makedirs("outputs", exist_ok=True)
 
 
48
  clips = [VideoFileClip(vp) for vp in video_paths]
49
  final_clip = concatenate_videoclips(clips, method="compose")
50
  final_clip.write_videofile(output_path, codec="libx264", fps=clips[0].fps, audio=False)
51
  return output_path
52
 
53
+ # Generate a video snippet from an input image
 
54
  def generate_snippet(
55
+ init_image: Image, seed: int, motion_bucket_id: int, fps_id: int, decoding_t: int, output_folder: str
 
 
 
 
 
 
56
  ):
 
 
 
 
57
  generator = torch.manual_seed(seed)
58
  os.makedirs(output_folder, exist_ok=True)
59
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
60
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
61
 
 
62
  result = pipe(
63
  init_image,
64
  decode_chunk_size=decoding_t,
 
67
  noise_aug_strength=0.1,
68
  num_frames=25
69
  )
70
+ frames = result.frames[0]
 
 
71
  export_to_video(frames, video_path, fps=fps_id)
72
 
73
+ return frames[-1], video_path
74
 
75
+ # Generate a long video composed of 5 short snippets
76
  def sample_long(
77
  image: Image,
78
  seed: Optional[int] = 42,
79
  randomize_seed: bool = True,
80
  motion_bucket_id: int = 127,
81
  fps_id: int = 6,
82
+ decoding_t: int = 3,
83
+ output_folder: str = "outputs"
 
 
 
84
  ):
 
 
 
 
85
  if image.mode == "RGBA":
86
  image = image.convert("RGB")
 
87
  if randomize_seed:
88
  seed = random.randint(0, max_64_bit_int)
 
89
 
90
  snippet_paths = []
91
  current_image = image
92
+ for _ in range(5):
93
+ current_image, snippet_path = generate_snippet(
 
94
  init_image=current_image,
95
  seed=seed,
96
  motion_bucket_id=motion_bucket_id,
97
  fps_id=fps_id,
98
  decoding_t=decoding_t,
 
99
  output_folder=output_folder
100
  )
101
  snippet_paths.append(snippet_path)
102
 
103
+ return combine_videos(snippet_paths), seed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
+ # Build the Gradio interface
106
  with gr.Blocks() as demo:
107
+ gr.Markdown("### Stable Video Diffusion - Generate a Long Video")
108
+
 
 
 
 
 
 
109
  with gr.Row():
110
  with gr.Column():
111
+ image = gr.Image(label="Upload an image", type="pil")
112
+ generate_btn = gr.Button("Generate Long Video")
113
+ video_output = gr.Video()
114
+
115
+ with gr.Accordion("Advanced Options", open=False):
116
+ seed = gr.Slider(0, max_64_bit_int, value=42, step=1, label="Seed")
117
+ randomize_seed = gr.Checkbox(value=True, label="Randomize Seed")
118
+ motion_bucket_id = gr.Slider(1, 255, value=127, step=1, label="Motion Bucket ID")
119
+ fps_id = gr.Slider(5, 30, value=6, step=1, label="Frames Per Second")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
 
121
  generate_btn.click(
122
+ sample_long,
123
+ inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id],
124
+ outputs=[video_output, seed]
 
125
  )
126
 
127
  if __name__ == "__main__":
128
+ demo.launch(share=True)