Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import torch | |
import spaces | |
from diffusers import LTXConditionPipeline, LTXLatentUpsamplePipeline | |
from diffusers.utils import export_to_video | |
from PIL import Image, ImageOps | |
from gtts import gTTS | |
from pydub import AudioSegment | |
import whisper | |
import ffmpeg | |
import requests | |
from io import BytesIO | |
import os | |
import gc | |
# Load LTX models | |
ltx_model_id = "Lightricks/LTX-Video-0.9.7-distilled" | |
upscaler_model_id = "Lightricks/ltxv-spatial-upscaler-0.9.7" | |
pipe = LTXConditionPipeline.from_pretrained(ltx_model_id, torch_dtype=torch.float16) | |
pipe_upsample = LTXLatentUpsamplePipeline.from_pretrained( | |
upscaler_model_id, vae=pipe.vae, torch_dtype=torch.float16 | |
) | |
pipe.to("cuda") | |
pipe_upsample.to("cuda") | |
pipe.vae.enable_tiling() | |
def prepare_image_condition(image, size=(512, 512), background=(0, 0, 0)): | |
image = ImageOps.contain(image, size) | |
canvas = Image.new("RGB", size, background) | |
offset = ((size[0] - image.width) // 2, (size[1] - image.height) // 2) | |
canvas.paste(image, offset) | |
return canvas | |
def round_to_nearest_resolution(height, width, ratio): | |
return height - (height % ratio), width - (width % ratio) | |
def generate_video(prompt, image_url): | |
generator = torch.Generator("cuda").manual_seed(42) | |
# Aspect-ratio preserving image prep | |
image = None | |
if image_url: | |
raw_image = Image.open(BytesIO(requests.get(image_url).content)).convert("RGB") | |
image = prepare_image_condition(raw_image) | |
# Dimensions | |
base_width, base_height = 512, 512 | |
downscale = 2 / 3 | |
w_d, h_d = round_to_nearest_resolution(int(base_width * downscale), int(base_height * downscale), pipe.vae_spatial_compression_ratio) | |
# Step 1: Generate latents | |
latents = pipe( | |
prompt=prompt, | |
image=image, | |
width=w_d, | |
height=h_d, | |
num_frames=60, | |
num_inference_steps=7, | |
output_type="latent", | |
guidance_scale=1.0, | |
decode_timestep=0.05, | |
decode_noise_scale=0.025, | |
generator=generator | |
).frames | |
torch.cuda.empty_cache() | |
gc.collect() | |
# Step 2: Upscale | |
upscaled = pipe_upsample(latents=latents, output_type="latent").frames | |
torch.cuda.empty_cache() | |
gc.collect() | |
# Step 3: Decode to frames | |
frames = pipe( | |
prompt=prompt, | |
image=image, | |
latents=upscaled, | |
width=base_width, | |
height=base_height, | |
num_frames=60, | |
num_inference_steps=10, | |
output_type="pil", | |
guidance_scale=1.0, | |
decode_timestep=0.05, | |
decode_noise_scale=0.025, | |
image_cond_noise_scale=0.025, | |
denoise_strength=0.3, | |
generator=generator | |
).frames[0] | |
# Step 4: Export video | |
video_path = "output.mp4" | |
export_to_video(frames, video_path, fps=24) | |
# Step 5: TTS | |
tts = gTTS(text=prompt, lang='en') | |
tts.save("voice.mp3") | |
AudioSegment.from_mp3("voice.mp3").export("voice.wav", format="wav") | |
# Step 6: Subtitles (CPU) | |
model = whisper.load_model("base", device="cpu") | |
result = model.transcribe("voice.wav", task="transcribe", language="en") | |
with open("subtitles.srt", "w", encoding="utf-8") as f: | |
f.write(result["srt"]) | |
# Step 7: Merge video + audio + subtitles | |
final_output = "final_with_audio.mp4" | |
ffmpeg.input(video_path).output( | |
final_output, | |
vf="subtitles=subtitles.srt", | |
i="voice.mp3", | |
c="copy", | |
shortest=None, | |
loglevel="error" | |
).run() | |
return final_output | |
# Gradio UI | |
demo = gr.Interface( | |
fn=generate_video, | |
inputs=[ | |
gr.Textbox(label="Prompt", placeholder="Describe your scene..."), | |
gr.Textbox(label="Optional Image URL (e.g. Pexels)", placeholder="https://...") | |
], | |
outputs=gr.Video(label="Generated Video"), | |
title="π¬ LTX AI Video Generator", | |
description="AI-powered video with voiceover and subtitles. Supports ZeroGPU (PyTorch) runtime." | |
) | |
demo.launch() | |