Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
import os | |
import gc | |
import numpy as np | |
import tempfile | |
from typing import Optional, Tuple | |
import time | |
# ZeroGPU import - bu çok önemli! | |
import spaces | |
# Check if running in ZeroGPU environment | |
IS_ZERO_GPU = os.environ.get("SPACES_ZERO_GPU") == "true" | |
IS_SPACES = os.environ.get("SPACE_ID") is not None | |
def load_model(): | |
"""Load LTX-Video model - this will run on ZeroGPU when decorated""" | |
try: | |
from diffusers import LTXVideoPipeline | |
print("🔄 Loading LTX-Video model...") | |
pipe = LTXVideoPipeline.from_pretrained( | |
"Lightricks/LTX-Video", | |
torch_dtype=torch.bfloat16, | |
use_safetensors=True, | |
) | |
# ZeroGPU optimizations | |
if IS_ZERO_GPU: | |
pipe = pipe.to("cuda") | |
# Enable memory optimizations | |
pipe.enable_vae_slicing() | |
pipe.enable_vae_tiling() | |
if hasattr(pipe, 'enable_memory_efficient_attention'): | |
pipe.enable_memory_efficient_attention() | |
print("✅ Model loaded successfully!") | |
return pipe | |
except Exception as e: | |
print(f"❌ Model loading failed: {e}") | |
return None | |
# Global model variable - will be loaded when needed | |
MODEL = None | |
# ZeroGPU decorator - 2 dakika GPU kullanımı | |
def generate_video( | |
prompt: str, | |
negative_prompt: str = "", | |
num_frames: int = 25, | |
height: int = 512, | |
width: int = 512, | |
num_inference_steps: int = 20, | |
guidance_scale: float = 7.5, | |
seed: int = -1 | |
) -> Tuple[Optional[str], str]: | |
"""Generate video using LTX-Video with ZeroGPU""" | |
global MODEL | |
# Load model if not already loaded | |
if MODEL is None: | |
MODEL = load_model() | |
if MODEL is None: | |
return None, "❌ Model loading failed. Please try again." | |
# Input validation | |
if not prompt.strip(): | |
return None, "❌ Please enter a valid prompt." | |
if len(prompt) > 300: | |
return None, "❌ Prompt too long. Please keep it under 300 characters." | |
# ZeroGPU optimizations - limit parameters for stability | |
num_frames = min(num_frames, 25) # Max 25 frames | |
num_inference_steps = min(num_inference_steps, 25) # Max 25 steps | |
height = min(height, 768) # Max 768px | |
width = min(width, 768) # Max 768px | |
try: | |
# Clear CUDA cache | |
torch.cuda.empty_cache() | |
gc.collect() | |
# Set seed for reproducibility | |
generator = None | |
if seed == -1: | |
seed = np.random.randint(0, 2**32 - 1) | |
generator = torch.Generator(device="cuda").manual_seed(seed) | |
print(f"🎬 Generating video: {prompt}") | |
start_time = time.time() | |
# Generate video | |
with torch.autocast("cuda", dtype=torch.bfloat16): | |
result = MODEL( | |
prompt=prompt, | |
negative_prompt=negative_prompt if negative_prompt else None, | |
num_frames=num_frames, | |
height=height, | |
width=width, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=guidance_scale, | |
generator=generator, | |
) | |
end_time = time.time() | |
generation_time = end_time - start_time | |
# Export video | |
video_frames = result.frames[0] | |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file: | |
# Export to video file | |
from diffusers.utils import export_to_video | |
export_to_video(video_frames, tmp_file.name, fps=8) | |
video_path = tmp_file.name | |
# Clear memory | |
torch.cuda.empty_cache() | |
gc.collect() | |
success_msg = f""" | |
✅ Video generated successfully with ZeroGPU! | |
📝 Prompt: {prompt} | |
🎬 Frames: {num_frames} | |
📐 Resolution: {width}x{height} | |
⚙️ Inference Steps: {num_inference_steps} | |
🎯 Guidance Scale: {guidance_scale} | |
🎲 Seed: {seed} | |
⏱️ Generation Time: {generation_time:.1f}s | |
🖥️ ZeroGPU: {'✅' if IS_ZERO_GPU else '❌'} | |
""" | |
return video_path, success_msg | |
except torch.cuda.OutOfMemoryError: | |
torch.cuda.empty_cache() | |
gc.collect() | |
return None, "❌ GPU memory exceeded. Try reducing frames, resolution, or inference steps." | |
except Exception as e: | |
torch.cuda.empty_cache() | |
gc.collect() | |
return None, f"❌ Generation failed: {str(e)}" | |
def get_system_info(): | |
"""Get system information""" | |
gpu_info = "Not available" | |
if torch.cuda.is_available(): | |
gpu_info = f"{torch.cuda.get_device_name(0)} ({torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB)" | |
return f""" | |
## 🖥️ System Information | |
**Environment:** | |
- ZeroGPU: {'✅ Active' if IS_ZERO_GPU else '❌ Not detected'} | |
- Hugging Face Spaces: {'✅' if IS_SPACES else '❌'} | |
- CUDA Available: {'✅' if torch.cuda.is_available() else '❌'} | |
- GPU: {gpu_info} | |
- PyTorch: {torch.__version__} | |
**Model Status:** | |
- LTX-Video: {'✅ Loaded' if MODEL is not None else '⏳ Will load on first use'} | |
**ZeroGPU Benefits:** | |
- ✅ Free GPU access | |
- ✅ A100 40GB GPU | |
- ✅ Automatic resource management | |
- ⏱️ 120 second timeout per generation | |
""" | |
# Create Gradio interface | |
with gr.Blocks(title="LTX-Video with ZeroGPU", theme=gr.themes.Soft()) as demo: | |
gr.Markdown(""" | |
# 🚀 LTX-Video Generator (ZeroGPU Powered) | |
Generate high-quality videos from text using Lightricks' LTX-Video model, powered by **ZeroGPU**! | |
⚡ **Free GPU access** - No need to upgrade your Space hardware! | |
""") | |
if IS_ZERO_GPU: | |
gr.Markdown("✅ **ZeroGPU Active** - You have free access to A100 GPU!") | |
else: | |
gr.Markdown("⚠️ **ZeroGPU not detected** - Make sure you've enabled ZeroGPU in your Space settings.") | |
with gr.Tab("🎥 Generate Video"): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
prompt_input = gr.Textbox( | |
label="📝 Video Prompt", | |
placeholder="A serene mountain lake reflecting the aurora borealis...", | |
lines=3, | |
max_lines=5 | |
) | |
negative_prompt_input = gr.Textbox( | |
label="🚫 Negative Prompt (Optional)", | |
placeholder="blurry, low quality, distorted, text, watermark...", | |
lines=2 | |
) | |
with gr.Accordion("🔧 Advanced Settings", open=False): | |
with gr.Row(): | |
num_frames = gr.Slider( | |
minimum=8, | |
maximum=25, # Limited for ZeroGPU | |
value=16, | |
step=1, | |
label="🎬 Number of Frames" | |
) | |
num_steps = gr.Slider( | |
minimum=10, | |
maximum=25, # Limited for ZeroGPU | |
value=20, | |
step=1, | |
label="⚙️ Inference Steps" | |
) | |
with gr.Row(): | |
width = gr.Dropdown( | |
choices=[256, 512, 768], # Limited for ZeroGPU | |
value=512, | |
label="📐 Width" | |
) | |
height = gr.Dropdown( | |
choices=[256, 512, 768], # Limited for ZeroGPU | |
value=512, | |
label="📏 Height" | |
) | |
with gr.Row(): | |
guidance_scale = gr.Slider( | |
minimum=1.0, | |
maximum=15.0, | |
value=7.5, | |
step=0.5, | |
label="🎯 Guidance Scale" | |
) | |
seed = gr.Number( | |
label="🎲 Seed (-1 for random)", | |
value=-1, | |
precision=0 | |
) | |
generate_btn = gr.Button("🚀 Generate Video with ZeroGPU", variant="primary", size="lg") | |
gr.Markdown(""" | |
**⏱️ Note:** Each generation uses 2 minutes of ZeroGPU time. | |
""") | |
with gr.Column(scale=1): | |
video_output = gr.Video( | |
label="🎥 Generated Video", | |
height=400 | |
) | |
result_text = gr.Textbox( | |
label="📋 Generation Info", | |
lines=8, | |
show_copy_button=True | |
) | |
# Event handler | |
generate_btn.click( | |
fn=generate_video, | |
inputs=[ | |
prompt_input, negative_prompt_input, num_frames, | |
height, width, num_steps, guidance_scale, seed | |
], | |
outputs=[video_output, result_text] | |
) | |
# Example prompts | |
gr.Examples( | |
examples=[ | |
["A majestic eagle soaring over snow-capped mountains", "blurry, low quality", 16, 512, 512, 20, 7.5, 42], | |
["Ocean waves gently lapping on a tropical beach at sunset", "", 20, 512, 512, 20, 8.0, 123], | |
["A steaming cup of coffee on a rainy window sill", "text, watermark", 16, 512, 512, 15, 7.0, 456], | |
["Cherry blossoms falling in a peaceful Japanese garden", "", 20, 768, 512, 20, 7.5, 789] | |
], | |
inputs=[prompt_input, negative_prompt_input, num_frames, height, width, num_steps, guidance_scale, seed] | |
) | |
with gr.Tab("ℹ️ System Info"): | |
info_btn = gr.Button("🔍 Check System Status", variant="secondary") | |
system_output = gr.Markdown() | |
info_btn.click(fn=get_system_info, outputs=system_output) | |
demo.load(fn=get_system_info, outputs=system_output) | |
with gr.Tab("📚 ZeroGPU Guide"): | |
gr.Markdown(""" | |
## 🚀 ZeroGPU Nedir? | |
**ZeroGPU**, Hugging Face'in ücretsiz GPU hizmetidir: | |
### ✅ Avantajları: | |
- **Ücretsiz A100 GPU** erişimi | |
- **40GB GPU belleği** | |
- Otomatik kaynak yönetimi | |
- CPU Basic Space'te bile çalışır | |
### ⚙️ Nasıl Etkinleştirilir: | |
1. Space Settings → Advanced → ZeroGPU etkinleştir | |
2. `requirements.txt`'e `spaces` ekle | |
3. Kodda `@spaces.GPU()` decorator kullan | |
### 📊 Limitler: | |
- Fonksiyon başına max 120 saniye | |
- Eşzamanlı kullanım sınırı | |
- Yoğun zamanlarda kuyruk | |
### 💡 İpuçları: | |
- Küçük parametrelerle başlayın | |
- İlk çalıştırma model yükleme nedeniyle uzun sürebilir | |
- Hata alırsanız birkaç saniye bekleyip tekrar deneyin | |
""") | |
# Launch the app | |
if __name__ == "__main__": | |
demo.queue(max_size=10) # ZeroGPU için queue gerekli | |
demo.launch( | |
share=False, | |
server_name="0.0.0.0", | |
server_port=7860, | |
show_error=True | |
) |