ozilion commited on
Commit
1431767
Β·
verified Β·
1 Parent(s): 001d74d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +242 -186
app.py CHANGED
@@ -6,51 +6,122 @@ import numpy as np
6
  import tempfile
7
  from typing import Optional, Tuple
8
  import time
 
 
9
 
10
- # ZeroGPU import - bu Γ§ok ΓΆnemli!
11
- import spaces
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
- # Check if running in ZeroGPU environment
14
  IS_ZERO_GPU = os.environ.get("SPACES_ZERO_GPU") == "true"
15
  IS_SPACES = os.environ.get("SPACE_ID") is not None
16
 
17
- def load_model():
18
- """Load LTX-Video model - this will run on ZeroGPU when decorated"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  try:
 
 
 
20
  from diffusers import LTXVideoPipeline
 
21
 
22
- print("πŸ”„ Loading LTX-Video model...")
23
 
 
 
 
 
 
 
 
24
  pipe = LTXVideoPipeline.from_pretrained(
25
- "Lightricks/LTX-Video",
26
  torch_dtype=torch.bfloat16,
27
  use_safetensors=True,
 
28
  )
29
 
30
- # ZeroGPU optimizations
31
- if IS_ZERO_GPU:
32
  pipe = pipe.to("cuda")
33
- # Enable memory optimizations
 
 
 
34
  pipe.enable_vae_slicing()
35
  pipe.enable_vae_tiling()
36
- if hasattr(pipe, 'enable_memory_efficient_attention'):
37
- pipe.enable_memory_efficient_attention()
 
38
 
39
  print("βœ… Model loaded successfully!")
40
- return pipe
 
 
 
 
 
41
 
42
  except Exception as e:
43
- print(f"❌ Model loading failed: {e}")
44
- return None
 
45
 
46
- # Global model variable - will be loaded when needed
47
  MODEL = None
 
48
 
49
- @spaces.GPU(duration=120) # ZeroGPU decorator - 2 dakika GPU kullanΔ±mΔ±
 
 
 
 
 
 
 
 
50
  def generate_video(
51
  prompt: str,
52
  negative_prompt: str = "",
53
- num_frames: int = 25,
54
  height: int = 512,
55
  width: int = 512,
56
  num_inference_steps: int = 20,
@@ -59,48 +130,46 @@ def generate_video(
59
  ) -> Tuple[Optional[str], str]:
60
  """Generate video using LTX-Video with ZeroGPU"""
61
 
62
- global MODEL
63
-
64
- # Load model if not already loaded
65
- if MODEL is None:
66
- MODEL = load_model()
67
 
68
- if MODEL is None:
69
- return None, "❌ Model loading failed. Please try again."
 
 
70
 
71
  # Input validation
72
  if not prompt.strip():
73
  return None, "❌ Please enter a valid prompt."
74
 
75
- if len(prompt) > 300:
76
- return None, "❌ Prompt too long. Please keep it under 300 characters."
77
 
78
- # ZeroGPU optimizations - limit parameters for stability
79
- num_frames = min(num_frames, 25) # Max 25 frames
80
- num_inference_steps = min(num_inference_steps, 25) # Max 25 steps
81
- height = min(height, 768) # Max 768px
82
- width = min(width, 768) # Max 768px
83
 
84
  try:
85
- # Clear CUDA cache
86
- torch.cuda.empty_cache()
 
87
  gc.collect()
88
 
89
- # Set seed for reproducibility
90
- generator = None
91
  if seed == -1:
92
  seed = np.random.randint(0, 2**32 - 1)
93
 
94
- generator = torch.Generator(device="cuda").manual_seed(seed)
95
 
96
- print(f"🎬 Generating video: {prompt}")
97
  start_time = time.time()
98
 
99
  # Generate video
100
- with torch.autocast("cuda", dtype=torch.bfloat16):
101
  result = MODEL(
102
  prompt=prompt,
103
- negative_prompt=negative_prompt if negative_prompt else None,
104
  num_frames=num_frames,
105
  height=height,
106
  width=width,
@@ -112,228 +181,215 @@ def generate_video(
112
  end_time = time.time()
113
  generation_time = end_time - start_time
114
 
115
- # Export video
116
  video_frames = result.frames[0]
117
 
118
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file:
119
- # Export to video file
120
- from diffusers.utils import export_to_video
121
- export_to_video(video_frames, tmp_file.name, fps=8)
122
- video_path = tmp_file.name
 
 
 
 
123
 
124
  # Clear memory
125
- torch.cuda.empty_cache()
 
126
  gc.collect()
127
 
128
- success_msg = f"""
129
- βœ… Video generated successfully with ZeroGPU!
130
-
131
- πŸ“ Prompt: {prompt}
132
- 🎬 Frames: {num_frames}
133
- πŸ“ Resolution: {width}x{height}
134
- βš™οΈ Inference Steps: {num_inference_steps}
135
- 🎯 Guidance Scale: {guidance_scale}
136
- 🎲 Seed: {seed}
137
- ⏱️ Generation Time: {generation_time:.1f}s
138
- πŸ–₯️ ZeroGPU: {'βœ…' if IS_ZERO_GPU else '❌'}
139
- """
140
 
141
  return video_path, success_msg
142
 
143
  except torch.cuda.OutOfMemoryError:
144
- torch.cuda.empty_cache()
 
145
  gc.collect()
146
- return None, "❌ GPU memory exceeded. Try reducing frames, resolution, or inference steps."
147
 
148
  except Exception as e:
149
- torch.cuda.empty_cache()
 
150
  gc.collect()
151
  return None, f"❌ Generation failed: {str(e)}"
152
 
153
  def get_system_info():
154
- """Get system information"""
155
- gpu_info = "Not available"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  if torch.cuda.is_available():
157
- gpu_info = f"{torch.cuda.get_device_name(0)} ({torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB)"
 
 
 
 
158
 
159
- return f"""
160
- ## πŸ–₯️ System Information
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
- **Environment:**
163
- - ZeroGPU: {'βœ… Active' if IS_ZERO_GPU else '❌ Not detected'}
164
- - Hugging Face Spaces: {'βœ…' if IS_SPACES else '❌'}
165
- - CUDA Available: {'βœ…' if torch.cuda.is_available() else '❌'}
166
- - GPU: {gpu_info}
167
- - PyTorch: {torch.__version__}
 
 
 
 
168
 
169
- **Model Status:**
170
- - LTX-Video: {'βœ… Loaded' if MODEL is not None else '⏳ Will load on first use'}
 
 
 
 
171
 
172
- **ZeroGPU Benefits:**
173
- - βœ… Free GPU access
174
- - βœ… A100 40GB GPU
175
- - βœ… Automatic resource management
176
- - ⏱️ 120 second timeout per generation
177
- """
 
 
178
 
179
  # Create Gradio interface
180
- with gr.Blocks(title="LTX-Video with ZeroGPU", theme=gr.themes.Soft()) as demo:
181
 
182
  gr.Markdown("""
183
- # πŸš€ LTX-Video Generator (ZeroGPU Powered)
184
 
185
- Generate high-quality videos from text using Lightricks' LTX-Video model, powered by **ZeroGPU**!
186
-
187
- ⚑ **Free GPU access** - No need to upgrade your Space hardware!
188
  """)
189
 
190
- if IS_ZERO_GPU:
191
- gr.Markdown("βœ… **ZeroGPU Active** - You have free access to A100 GPU!")
192
- else:
193
- gr.Markdown("⚠️ **ZeroGPU not detected** - Make sure you've enabled ZeroGPU in your Space settings.")
 
 
194
 
195
  with gr.Tab("πŸŽ₯ Generate Video"):
196
  with gr.Row():
197
  with gr.Column(scale=1):
198
  prompt_input = gr.Textbox(
199
  label="πŸ“ Video Prompt",
200
- placeholder="A serene mountain lake reflecting the aurora borealis...",
201
  lines=3,
202
  max_lines=5
203
  )
204
 
205
  negative_prompt_input = gr.Textbox(
206
  label="🚫 Negative Prompt (Optional)",
207
- placeholder="blurry, low quality, distorted, text, watermark...",
208
  lines=2
209
  )
210
 
211
- with gr.Accordion("πŸ”§ Advanced Settings", open=False):
212
  with gr.Row():
213
- num_frames = gr.Slider(
214
- minimum=8,
215
- maximum=25, # Limited for ZeroGPU
216
- value=16,
217
- step=1,
218
- label="🎬 Number of Frames"
219
- )
220
-
221
- num_steps = gr.Slider(
222
- minimum=10,
223
- maximum=25, # Limited for ZeroGPU
224
- value=20,
225
- step=1,
226
- label="βš™οΈ Inference Steps"
227
- )
228
 
229
  with gr.Row():
230
- width = gr.Dropdown(
231
- choices=[256, 512, 768], # Limited for ZeroGPU
232
- value=512,
233
- label="πŸ“ Width"
234
- )
235
-
236
- height = gr.Dropdown(
237
- choices=[256, 512, 768], # Limited for ZeroGPU
238
- value=512,
239
- label="πŸ“ Height"
240
- )
241
 
242
  with gr.Row():
243
- guidance_scale = gr.Slider(
244
- minimum=1.0,
245
- maximum=15.0,
246
- value=7.5,
247
- step=0.5,
248
- label="🎯 Guidance Scale"
249
- )
250
-
251
- seed = gr.Number(
252
- label="🎲 Seed (-1 for random)",
253
- value=-1,
254
- precision=0
255
- )
256
 
257
- generate_btn = gr.Button("πŸš€ Generate Video with ZeroGPU", variant="primary", size="lg")
258
-
259
- gr.Markdown("""
260
- **⏱️ Note:** Each generation uses 2 minutes of ZeroGPU time.
261
- """)
262
 
263
  with gr.Column(scale=1):
264
- video_output = gr.Video(
265
- label="πŸŽ₯ Generated Video",
266
- height=400
267
- )
268
-
269
- result_text = gr.Textbox(
270
- label="πŸ“‹ Generation Info",
271
- lines=8,
272
- show_copy_button=True
273
- )
274
 
275
- # Event handler
276
  generate_btn.click(
277
  fn=generate_video,
278
- inputs=[
279
- prompt_input, negative_prompt_input, num_frames,
280
- height, width, num_steps, guidance_scale, seed
281
- ],
282
  outputs=[video_output, result_text]
283
  )
284
 
285
- # Example prompts
286
  gr.Examples(
287
  examples=[
288
- ["A majestic eagle soaring over snow-capped mountains", "blurry, low quality", 16, 512, 512, 20, 7.5, 42],
289
- ["Ocean waves gently lapping on a tropical beach at sunset", "", 20, 512, 512, 20, 8.0, 123],
290
- ["A steaming cup of coffee on a rainy window sill", "text, watermark", 16, 512, 512, 15, 7.0, 456],
291
- ["Cherry blossoms falling in a peaceful Japanese garden", "", 20, 768, 512, 20, 7.5, 789]
292
  ],
293
  inputs=[prompt_input, negative_prompt_input, num_frames, height, width, num_steps, guidance_scale, seed]
294
  )
295
 
296
  with gr.Tab("ℹ️ System Info"):
297
- info_btn = gr.Button("πŸ” Check System Status", variant="secondary")
298
  system_output = gr.Markdown()
299
 
300
  info_btn.click(fn=get_system_info, outputs=system_output)
301
  demo.load(fn=get_system_info, outputs=system_output)
302
 
303
- with gr.Tab("πŸ“š ZeroGPU Guide"):
304
- gr.Markdown("""
305
- ## πŸš€ ZeroGPU Nedir?
306
 
307
- **ZeroGPU**, Hugging Face'in ΓΌcretsiz GPU hizmetidir:
308
-
309
- ### βœ… AvantajlarΔ±:
310
- - **Ücretsiz A100 GPU** erişimi
311
- - **40GB GPU belleği**
312
- - Otomatik kaynak yΓΆnetimi
313
- - CPU Basic Space'te bile çalışır
314
-
315
- ### βš™οΈ NasΔ±l Etkinleştirilir:
316
- 1. Space Settings β†’ Advanced β†’ ZeroGPU etkinleştir
317
- 2. `requirements.txt`'e `spaces` ekle
318
- 3. Kodda `@spaces.GPU()` decorator kullan
319
-
320
- ### πŸ“Š Limitler:
321
- - Fonksiyon başına max 120 saniye
322
- - Eşzamanlı kullanım sınırı
323
- - Yoğun zamanlarda kuyruk
324
-
325
- ### πŸ’‘ Δ°puΓ§larΔ±:
326
- - Küçük parametrelerle başlayın
327
- - İlk çalıştırma model yükleme nedeniyle uzun sürebilir
328
- - Hata alΔ±rsanΔ±z birkaΓ§ saniye bekleyip tekrar deneyin
329
- """)
330
 
331
- # Launch the app
332
  if __name__ == "__main__":
333
- demo.queue(max_size=10) # ZeroGPU iΓ§in queue gerekli
334
  demo.launch(
335
  share=False,
336
- server_name="0.0.0.0",
337
  server_port=7860,
338
  show_error=True
339
  )
 
6
  import tempfile
7
  from typing import Optional, Tuple
8
  import time
9
+ import subprocess
10
+ import sys
11
 
12
+ # ZeroGPU import
13
+ try:
14
+ import spaces
15
+ SPACES_AVAILABLE = True
16
+ print("βœ… Spaces library loaded successfully")
17
+ except ImportError:
18
+ print("⚠️ Spaces library not available")
19
+ SPACES_AVAILABLE = False
20
+ # Create dummy decorator
21
+ def spaces_gpu_decorator(duration=60):
22
+ def decorator(func):
23
+ return func
24
+ return decorator
25
+ spaces = type('spaces', (), {'GPU': spaces_gpu_decorator})()
26
 
27
+ # Environment checks
28
  IS_ZERO_GPU = os.environ.get("SPACES_ZERO_GPU") == "true"
29
  IS_SPACES = os.environ.get("SPACE_ID") is not None
30
 
31
+ print(f"Environment: ZeroGPU={IS_ZERO_GPU}, Spaces={IS_SPACES}")
32
+
33
+ def check_and_install_requirements():
34
+ """Check and install missing requirements"""
35
+ try:
36
+ import diffusers
37
+ print(f"βœ… Diffusers version: {diffusers.__version__}")
38
+ return True
39
+ except ImportError:
40
+ print("❌ Diffusers not found, attempting to install...")
41
+ try:
42
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "diffusers[torch]>=0.30.0"])
43
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "transformers>=4.35.0"])
44
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "accelerate"])
45
+ import diffusers
46
+ print(f"βœ… Diffusers installed successfully: {diffusers.__version__}")
47
+ return True
48
+ except Exception as e:
49
+ print(f"❌ Failed to install diffusers: {e}")
50
+ return False
51
+
52
+ def load_model_safe():
53
+ """Safely load the LTX-Video model with comprehensive error handling"""
54
+
55
+ # First, ensure requirements are installed
56
+ if not check_and_install_requirements():
57
+ return None, "Failed to install required packages"
58
+
59
  try:
60
+ print("πŸ”„ Attempting to load LTX-Video model...")
61
+
62
+ # Import after installation
63
  from diffusers import LTXVideoPipeline
64
+ import torch
65
 
66
+ model_id = "Lightricks/LTX-Video"
67
 
68
+ # Check available memory
69
+ if torch.cuda.is_available():
70
+ gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
71
+ print(f"πŸ“Š Available GPU memory: {gpu_memory:.1f} GB")
72
+
73
+ # Load with conservative settings
74
+ print("πŸ“₯ Loading pipeline...")
75
  pipe = LTXVideoPipeline.from_pretrained(
76
+ model_id,
77
  torch_dtype=torch.bfloat16,
78
  use_safetensors=True,
79
+ variant="fp16"
80
  )
81
 
82
+ # Move to GPU if available
83
+ if torch.cuda.is_available():
84
  pipe = pipe.to("cuda")
85
+ print("πŸš€ Model moved to GPU")
86
+
87
+ # Enable optimizations
88
+ try:
89
  pipe.enable_vae_slicing()
90
  pipe.enable_vae_tiling()
91
+ print("⚑ Memory optimizations enabled")
92
+ except Exception as e:
93
+ print(f"⚠️ Some optimizations failed: {e}")
94
 
95
  print("βœ… Model loaded successfully!")
96
+ return pipe, None
97
+
98
+ except ImportError as e:
99
+ error_msg = f"Import error: {e}. Please check if diffusers is properly installed."
100
+ print(f"❌ {error_msg}")
101
+ return None, error_msg
102
 
103
  except Exception as e:
104
+ error_msg = f"Model loading failed: {str(e)}"
105
+ print(f"❌ {error_msg}")
106
+ return None, error_msg
107
 
108
+ # Global model variable
109
  MODEL = None
110
+ MODEL_ERROR = None
111
 
112
+ def initialize_model():
113
+ """Initialize model on first use"""
114
+ global MODEL, MODEL_ERROR
115
+ if MODEL is None and MODEL_ERROR is None:
116
+ print("πŸš€ Initializing model for first use...")
117
+ MODEL, MODEL_ERROR = load_model_safe()
118
+ return MODEL is not None
119
+
120
+ @spaces.GPU(duration=120) if SPACES_AVAILABLE else lambda x: x
121
  def generate_video(
122
  prompt: str,
123
  negative_prompt: str = "",
124
+ num_frames: int = 16,
125
  height: int = 512,
126
  width: int = 512,
127
  num_inference_steps: int = 20,
 
130
  ) -> Tuple[Optional[str], str]:
131
  """Generate video using LTX-Video with ZeroGPU"""
132
 
133
+ global MODEL, MODEL_ERROR
 
 
 
 
134
 
135
+ # Initialize model if needed
136
+ if not initialize_model():
137
+ error_msg = f"❌ Model initialization failed: {MODEL_ERROR or 'Unknown error'}"
138
+ return None, error_msg
139
 
140
  # Input validation
141
  if not prompt.strip():
142
  return None, "❌ Please enter a valid prompt."
143
 
144
+ if len(prompt) > 200:
145
+ return None, "❌ Prompt too long. Please keep it under 200 characters."
146
 
147
+ # Limit parameters for stability
148
+ num_frames = min(max(num_frames, 8), 24)
149
+ num_inference_steps = min(max(num_inference_steps, 10), 25)
150
+ height = min(max(height, 256), 768)
151
+ width = min(max(width, 256), 768)
152
 
153
  try:
154
+ # Clear memory
155
+ if torch.cuda.is_available():
156
+ torch.cuda.empty_cache()
157
  gc.collect()
158
 
159
+ # Set seed
 
160
  if seed == -1:
161
  seed = np.random.randint(0, 2**32 - 1)
162
 
163
+ generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
164
 
165
+ print(f"🎬 Generating: '{prompt[:50]}...'")
166
  start_time = time.time()
167
 
168
  # Generate video
169
+ with torch.autocast("cuda" if torch.cuda.is_available() else "cpu", dtype=torch.bfloat16):
170
  result = MODEL(
171
  prompt=prompt,
172
+ negative_prompt=negative_prompt if negative_prompt.strip() else None,
173
  num_frames=num_frames,
174
  height=height,
175
  width=width,
 
181
  end_time = time.time()
182
  generation_time = end_time - start_time
183
 
184
+ # Save video
185
  video_frames = result.frames[0]
186
 
187
  with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmp_file:
188
+ try:
189
+ from diffusers.utils import export_to_video
190
+ export_to_video(video_frames, tmp_file.name, fps=8)
191
+ video_path = tmp_file.name
192
+ except Exception as e:
193
+ # Fallback: save as individual frames if export fails
194
+ print(f"⚠️ Video export failed, trying alternative: {e}")
195
+ return None, f"❌ Video export failed: {str(e)}"
196
 
197
  # Clear memory
198
+ if torch.cuda.is_available():
199
+ torch.cuda.empty_cache()
200
  gc.collect()
201
 
202
+ success_msg = f"""βœ… Video generated successfully!
203
+
204
+ πŸ“ **Prompt:** {prompt}
205
+ 🎬 **Frames:** {num_frames}
206
+ πŸ“ **Resolution:** {width}x{height}
207
+ βš™οΈ **Inference Steps:** {num_inference_steps}
208
+ 🎯 **Guidance Scale:** {guidance_scale}
209
+ 🎲 **Seed:** {seed}
210
+ ⏱️ **Generation Time:** {generation_time:.1f}s
211
+ πŸ–₯️ **Device:** {'CUDA' if torch.cuda.is_available() else 'CPU'}
212
+ ⚑ **ZeroGPU:** {'βœ…' if IS_ZERO_GPU else '❌'}"""
 
213
 
214
  return video_path, success_msg
215
 
216
  except torch.cuda.OutOfMemoryError:
217
+ if torch.cuda.is_available():
218
+ torch.cuda.empty_cache()
219
  gc.collect()
220
+ return None, "❌ GPU memory exceeded. Try reducing frames/resolution or try again in a moment."
221
 
222
  except Exception as e:
223
+ if torch.cuda.is_available():
224
+ torch.cuda.empty_cache()
225
  gc.collect()
226
  return None, f"❌ Generation failed: {str(e)}"
227
 
228
  def get_system_info():
229
+ """Get comprehensive system information"""
230
+
231
+ # Check package versions
232
+ package_info = {}
233
+ try:
234
+ import diffusers
235
+ package_info['diffusers'] = diffusers.__version__
236
+ except ImportError:
237
+ package_info['diffusers'] = '❌ Not installed'
238
+
239
+ try:
240
+ import transformers
241
+ package_info['transformers'] = transformers.__version__
242
+ except ImportError:
243
+ package_info['transformers'] = '❌ Not installed'
244
+
245
+ # GPU info
246
+ gpu_info = "❌ Not available"
247
+ gpu_memory = 0
248
  if torch.cuda.is_available():
249
+ try:
250
+ gpu_info = torch.cuda.get_device_name(0)
251
+ gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
252
+ except:
253
+ gpu_info = "βœ… Available (details unavailable)"
254
 
255
+ return f"""## πŸ–₯️ System Information
256
+
257
+ **Environment:**
258
+ - πŸš€ ZeroGPU: {'βœ… Active' if IS_ZERO_GPU else '❌ Not detected'}
259
+ - 🏠 HF Spaces: {'βœ…' if IS_SPACES else '❌'}
260
+ - πŸ”₯ CUDA: {'βœ…' if torch.cuda.is_available() else '❌'}
261
+ - πŸ–₯️ GPU: {gpu_info} ({gpu_memory:.1f} GB)
262
+
263
+ **Packages:**
264
+ - PyTorch: {torch.__version__}
265
+ - Diffusers: {package_info.get('diffusers', 'Unknown')}
266
+ - Transformers: {package_info.get('transformers', 'Unknown')}
267
+ - Spaces: {'βœ…' if SPACES_AVAILABLE else '❌'}
268
+
269
+ **Model Status:**
270
+ - LTX-Video: {'βœ… Loaded' if MODEL is not None else '⏳ Will load on first use' if MODEL_ERROR is None else f'❌ Error: {MODEL_ERROR}'}
271
+
272
+ **Tips:**
273
+ {'🎯 Ready to generate!' if MODEL is not None else '⚑ First generation will take longer due to model loading'}"""
274
+
275
+ def test_dependencies():
276
+ """Test if all dependencies are working"""
277
+ results = []
278
 
279
+ # Test torch
280
+ try:
281
+ import torch
282
+ results.append(f"βœ… PyTorch {torch.__version__}")
283
+ if torch.cuda.is_available():
284
+ results.append(f"βœ… CUDA {torch.version.cuda}")
285
+ else:
286
+ results.append("⚠️ CUDA not available")
287
+ except Exception as e:
288
+ results.append(f"❌ PyTorch: {e}")
289
 
290
+ # Test diffusers
291
+ try:
292
+ import diffusers
293
+ results.append(f"βœ… Diffusers {diffusers.__version__}")
294
+ except Exception as e:
295
+ results.append(f"❌ Diffusers: {e}")
296
 
297
+ # Test transformers
298
+ try:
299
+ import transformers
300
+ results.append(f"βœ… Transformers {transformers.__version__}")
301
+ except Exception as e:
302
+ results.append(f"❌ Transformers: {e}")
303
+
304
+ return "\n".join(results)
305
 
306
  # Create Gradio interface
307
+ with gr.Blocks(title="LTX-Video ZeroGPU", theme=gr.themes.Soft()) as demo:
308
 
309
  gr.Markdown("""
310
+ # πŸš€ LTX-Video Generator (ZeroGPU)
311
 
312
+ Generate high-quality videos from text using **Lightricks LTX-Video** model with **ZeroGPU**!
 
 
313
  """)
314
 
315
+ # Status indicator
316
+ with gr.Row():
317
+ gr.Markdown(f"""
318
+ **Status:** {'🟒 ZeroGPU Active' if IS_ZERO_GPU else '🟑 CPU Mode'} |
319
+ **Environment:** {'HF Spaces' if IS_SPACES else 'Local'}
320
+ """)
321
 
322
  with gr.Tab("πŸŽ₯ Generate Video"):
323
  with gr.Row():
324
  with gr.Column(scale=1):
325
  prompt_input = gr.Textbox(
326
  label="πŸ“ Video Prompt",
327
+ placeholder="A majestic eagle soaring through mountain peaks...",
328
  lines=3,
329
  max_lines=5
330
  )
331
 
332
  negative_prompt_input = gr.Textbox(
333
  label="🚫 Negative Prompt (Optional)",
334
+ placeholder="blurry, low quality, distorted...",
335
  lines=2
336
  )
337
 
338
+ with gr.Accordion("βš™οΈ Settings", open=True):
339
  with gr.Row():
340
+ num_frames = gr.Slider(8, 24, value=16, step=1, label="🎬 Frames")
341
+ num_steps = gr.Slider(10, 25, value=20, step=1, label="πŸ”„ Steps")
 
 
 
 
 
 
 
 
 
 
 
 
 
342
 
343
  with gr.Row():
344
+ width = gr.Dropdown([256, 512, 768], value=512, label="πŸ“ Width")
345
+ height = gr.Dropdown([256, 512, 768], value=512, label="πŸ“ Height")
 
 
 
 
 
 
 
 
 
346
 
347
  with gr.Row():
348
+ guidance_scale = gr.Slider(1.0, 12.0, value=7.5, step=0.5, label="🎯 Guidance")
349
+ seed = gr.Number(value=-1, precision=0, label="🎲 Seed (-1=random)")
 
 
 
 
 
 
 
 
 
 
 
350
 
351
+ generate_btn = gr.Button("πŸš€ Generate Video", variant="primary", size="lg")
 
 
 
 
352
 
353
  with gr.Column(scale=1):
354
+ video_output = gr.Video(label="πŸŽ₯ Generated Video", height=400)
355
+ result_text = gr.Textbox(label="πŸ“‹ Results", lines=6, show_copy_button=True)
 
 
 
 
 
 
 
 
356
 
357
+ # Event handlers
358
  generate_btn.click(
359
  fn=generate_video,
360
+ inputs=[prompt_input, negative_prompt_input, num_frames, height, width, num_steps, guidance_scale, seed],
 
 
 
361
  outputs=[video_output, result_text]
362
  )
363
 
364
+ # Examples
365
  gr.Examples(
366
  examples=[
367
+ ["A peaceful cat sleeping in a sunny garden", "", 16, 512, 512, 20, 7.5, 42],
368
+ ["Ocean waves at sunset, cinematic view", "blurry", 20, 512, 512, 20, 8.0, 123],
369
+ ["A hummingbird hovering near red flowers", "", 16, 512, 512, 15, 7.0, 456]
 
370
  ],
371
  inputs=[prompt_input, negative_prompt_input, num_frames, height, width, num_steps, guidance_scale, seed]
372
  )
373
 
374
  with gr.Tab("ℹ️ System Info"):
375
+ info_btn = gr.Button("πŸ” Check System", variant="secondary")
376
  system_output = gr.Markdown()
377
 
378
  info_btn.click(fn=get_system_info, outputs=system_output)
379
  demo.load(fn=get_system_info, outputs=system_output)
380
 
381
+ with gr.Tab("πŸ”§ Debug"):
382
+ test_btn = gr.Button("πŸ§ͺ Test Dependencies")
383
+ test_output = gr.Textbox(label="Test Results", lines=10)
384
 
385
+ test_btn.click(fn=test_dependencies, outputs=test_output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
 
387
+ # Launch
388
  if __name__ == "__main__":
389
+ demo.queue(max_size=5)
390
  demo.launch(
391
  share=False,
392
+ server_name="0.0.0.0",
393
  server_port=7860,
394
  show_error=True
395
  )