Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -26,48 +26,33 @@ pipe.to("cuda")
|
|
26 |
pipe_upsample.to("cuda")
|
27 |
pipe.vae.enable_tiling()
|
28 |
|
29 |
-
def prepare_image_condition(image, size=(
|
30 |
image = ImageOps.contain(image, size)
|
31 |
canvas = Image.new("RGB", size, background)
|
32 |
offset = ((size[0] - image.width) // 2, (size[1] - image.height) // 2)
|
33 |
canvas.paste(image, offset)
|
34 |
return canvas
|
35 |
|
36 |
-
def round_to_nearest_resolution(height, width, ratio):
|
37 |
-
return height - (height % ratio), width - (width % ratio)
|
38 |
-
|
39 |
@spaces.GPU(duration=180)
|
40 |
def generate_video(prompt, image_url):
|
41 |
generator = torch.Generator("cuda").manual_seed(42)
|
42 |
-
|
43 |
-
#
|
44 |
image = None
|
45 |
if image_url:
|
46 |
raw_image = Image.open(BytesIO(requests.get(image_url).content)).convert("RGB")
|
47 |
image = prepare_image_condition(raw_image)
|
48 |
-
|
49 |
-
#
|
50 |
-
base_width, base_height =
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
int(base_width * downscale),
|
55 |
-
int(base_height * downscale),
|
56 |
-
ratio=pipe.vae_spatial_compression_ratio
|
57 |
-
)
|
58 |
-
# Upscaled dimensions must also be VAE-aligned
|
59 |
-
w_up, h_up = round_to_nearest_resolution(
|
60 |
-
base_width,
|
61 |
-
base_height,
|
62 |
-
ratio=pipe.vae_spatial_compression_ratio
|
63 |
-
)
|
64 |
-
|
65 |
-
# Step 1: Generate latents
|
66 |
latents = pipe(
|
67 |
prompt=prompt,
|
68 |
image=image,
|
69 |
-
width=
|
70 |
-
height=
|
71 |
num_frames=60,
|
72 |
num_inference_steps=7,
|
73 |
output_type="latent",
|
@@ -76,61 +61,98 @@ def generate_video(prompt, image_url):
|
|
76 |
decode_noise_scale=0.025,
|
77 |
generator=generator
|
78 |
).frames
|
79 |
-
|
80 |
torch.cuda.empty_cache()
|
81 |
gc.collect()
|
82 |
-
|
83 |
-
# Step 2: Upscale
|
84 |
-
|
85 |
-
|
86 |
torch.cuda.empty_cache()
|
87 |
gc.collect()
|
88 |
-
|
89 |
-
# Step 3: Decode to frames
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
generator=generator
|
105 |
-
).frames[0]
|
106 |
-
|
107 |
# Step 4: Export video
|
108 |
video_path = "output.mp4"
|
109 |
-
export_to_video(
|
110 |
-
|
111 |
# Step 5: TTS
|
112 |
tts = gTTS(text=prompt, lang='en')
|
113 |
tts.save("voice.mp3")
|
114 |
AudioSegment.from_mp3("voice.mp3").export("voice.wav", format="wav")
|
115 |
-
|
116 |
-
# Step 6: Subtitles
|
117 |
model = whisper.load_model("base", device="cpu")
|
118 |
result = model.transcribe("voice.wav", task="transcribe", language="en")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
with open("subtitles.srt", "w", encoding="utf-8") as f:
|
120 |
-
f.write(
|
121 |
-
|
122 |
# Step 7: Merge video + audio + subtitles
|
123 |
final_output = "final_with_audio.mp4"
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
# Gradio UI
|
136 |
demo = gr.Interface(
|
@@ -141,7 +163,7 @@ demo = gr.Interface(
|
|
141 |
],
|
142 |
outputs=gr.Video(label="Generated Video"),
|
143 |
title="🎬 LTX AI Video Generator",
|
144 |
-
description="AI-powered video with voiceover and subtitles.
|
145 |
)
|
146 |
|
147 |
-
demo.launch()
|
|
|
26 |
pipe_upsample.to("cuda")
|
27 |
pipe.vae.enable_tiling()
|
28 |
|
29 |
+
def prepare_image_condition(image, size=(480, 480), background=(0, 0, 0)):
|
30 |
image = ImageOps.contain(image, size)
|
31 |
canvas = Image.new("RGB", size, background)
|
32 |
offset = ((size[0] - image.width) // 2, (size[1] - image.height) // 2)
|
33 |
canvas.paste(image, offset)
|
34 |
return canvas
|
35 |
|
|
|
|
|
|
|
36 |
@spaces.GPU(duration=180)
|
37 |
def generate_video(prompt, image_url):
|
38 |
generator = torch.Generator("cuda").manual_seed(42)
|
39 |
+
|
40 |
+
# Load & prepare image
|
41 |
image = None
|
42 |
if image_url:
|
43 |
raw_image = Image.open(BytesIO(requests.get(image_url).content)).convert("RGB")
|
44 |
image = prepare_image_condition(raw_image)
|
45 |
+
|
46 |
+
# Set target resolutions
|
47 |
+
base_width, base_height = 480, 480 # final size (must be divisible by 16)
|
48 |
+
down_width, down_height = 320, 320 # for latent generation (must also be divisible by 16)
|
49 |
+
|
50 |
+
# Step 1: Generate latents at lower resolution
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
latents = pipe(
|
52 |
prompt=prompt,
|
53 |
image=image,
|
54 |
+
width=down_width,
|
55 |
+
height=down_height,
|
56 |
num_frames=60,
|
57 |
num_inference_steps=7,
|
58 |
output_type="latent",
|
|
|
61 |
decode_noise_scale=0.025,
|
62 |
generator=generator
|
63 |
).frames
|
64 |
+
|
65 |
torch.cuda.empty_cache()
|
66 |
gc.collect()
|
67 |
+
|
68 |
+
# Step 2: Upscale latents
|
69 |
+
upscaled_latents = pipe_upsample(latents=latents, output_type="latent").frames
|
70 |
+
|
71 |
torch.cuda.empty_cache()
|
72 |
gc.collect()
|
73 |
+
|
74 |
+
# Step 3: Decode upscaled latents to frames
|
75 |
+
# Use the VAE decoder directly instead of the full pipeline
|
76 |
+
frames = pipe.vae.decode(upscaled_latents).sample
|
77 |
+
frames = (frames / 2 + 0.5).clamp(0, 1) # Normalize to [0, 1]
|
78 |
+
frames = (frames * 255).to(torch.uint8) # Convert to uint8
|
79 |
+
|
80 |
+
# Convert tensor to PIL Images
|
81 |
+
pil_frames = []
|
82 |
+
for i in range(frames.shape[2]): # num_frames dimension
|
83 |
+
frame = frames[0, :, i, :, :].permute(1, 2, 0).cpu().numpy()
|
84 |
+
pil_frames.append(Image.fromarray(frame))
|
85 |
+
|
86 |
+
torch.cuda.empty_cache()
|
87 |
+
gc.collect()
|
88 |
+
|
|
|
|
|
|
|
89 |
# Step 4: Export video
|
90 |
video_path = "output.mp4"
|
91 |
+
export_to_video(pil_frames, video_path, fps=24)
|
92 |
+
|
93 |
# Step 5: TTS
|
94 |
tts = gTTS(text=prompt, lang='en')
|
95 |
tts.save("voice.mp3")
|
96 |
AudioSegment.from_mp3("voice.mp3").export("voice.wav", format="wav")
|
97 |
+
|
98 |
+
# Step 6: Subtitles
|
99 |
model = whisper.load_model("base", device="cpu")
|
100 |
result = model.transcribe("voice.wav", task="transcribe", language="en")
|
101 |
+
|
102 |
+
# Generate SRT subtitles manually since result["srt"] might not be available
|
103 |
+
srt_content = ""
|
104 |
+
for i, segment in enumerate(result["segments"]):
|
105 |
+
start_time = format_time(segment["start"])
|
106 |
+
end_time = format_time(segment["end"])
|
107 |
+
text = segment["text"].strip()
|
108 |
+
srt_content += f"{i + 1}\n{start_time} --> {end_time}\n{text}\n\n"
|
109 |
+
|
110 |
with open("subtitles.srt", "w", encoding="utf-8") as f:
|
111 |
+
f.write(srt_content)
|
112 |
+
|
113 |
# Step 7: Merge video + audio + subtitles
|
114 |
final_output = "final_with_audio.mp4"
|
115 |
+
try:
|
116 |
+
(
|
117 |
+
ffmpeg
|
118 |
+
.input(video_path)
|
119 |
+
.output(
|
120 |
+
final_output,
|
121 |
+
vf="subtitles=subtitles.srt",
|
122 |
+
**{"c:v": "libx264", "c:a": "aac"},
|
123 |
+
loglevel="error"
|
124 |
+
)
|
125 |
+
.run(overwrite_output=True)
|
126 |
+
)
|
127 |
+
|
128 |
+
# Add audio track
|
129 |
+
(
|
130 |
+
ffmpeg
|
131 |
+
.input(final_output)
|
132 |
+
.input("voice.wav")
|
133 |
+
.output(
|
134 |
+
"final_complete.mp4",
|
135 |
+
**{"c:v": "copy", "c:a": "aac"},
|
136 |
+
shortest=None,
|
137 |
+
loglevel="error"
|
138 |
+
)
|
139 |
+
.run(overwrite_output=True)
|
140 |
+
)
|
141 |
+
|
142 |
+
return "final_complete.mp4"
|
143 |
+
|
144 |
+
except Exception as e:
|
145 |
+
print(f"FFmpeg error: {e}")
|
146 |
+
# Fallback: return video without audio/subtitles
|
147 |
+
return video_path
|
148 |
+
|
149 |
+
def format_time(seconds):
|
150 |
+
"""Convert seconds to SRT time format"""
|
151 |
+
hours = int(seconds // 3600)
|
152 |
+
minutes = int((seconds % 3600) // 60)
|
153 |
+
secs = int(seconds % 60)
|
154 |
+
millisecs = int((seconds % 1) * 1000)
|
155 |
+
return f"{hours:02d}:{minutes:02d}:{secs:02d},{millisecs:03d}"
|
156 |
|
157 |
# Gradio UI
|
158 |
demo = gr.Interface(
|
|
|
163 |
],
|
164 |
outputs=gr.Video(label="Generated Video"),
|
165 |
title="🎬 LTX AI Video Generator",
|
166 |
+
description="AI-powered video with voiceover and subtitles. Now outputs at 480x480 resolution."
|
167 |
)
|
168 |
|
169 |
+
demo.launch()
|