nftnik commited on
Commit
fd4cd12
·
verified ·
1 Parent(s): 634839d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -179
app.py CHANGED
@@ -3,7 +3,6 @@ import random
3
  import torch
4
  import numpy as np
5
  import gradio as gr
6
- import spaces
7
  from diffusers import FluxPipeline
8
  from translatepy import Translator
9
 
@@ -27,77 +26,64 @@ class Config:
27
  ENABLE_SEQUENTIAL_CPU_OFFLOAD = True
28
  ENABLE_ATTENTION_SLICING = "max"
29
 
30
-
31
  # -----------------------------------------------------------------------------
32
  # FluxGenerator class to handle image generation
33
  # -----------------------------------------------------------------------------
34
  class FluxGenerator:
35
  def __init__(self):
36
- # Environment setup
37
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
38
  self.translator = Translator()
39
  self.device = self._get_optimal_device()
40
  print(f"Using {self.device.upper()}")
41
-
42
- # Initialize pipeline
43
  self.pipe = None
44
  self._initialize_pipeline()
45
-
46
  def _get_optimal_device(self):
47
  """Determine the optimal device based on available resources"""
48
  if torch.cuda.is_available():
49
- # Check GPU memory
50
  try:
51
  gpu_memory = torch.cuda.get_device_properties(0).total_memory
52
  if gpu_memory > 10 * 1024 * 1024 * 1024: # More than 10GB
53
  return "cuda"
54
  else:
55
- print("Limited GPU memory detected, using CPU with GPU acceleration")
56
- return "cuda" # Still use CUDA but will apply memory optimizations
57
  except:
58
  print("Error checking GPU memory, falling back to CPU")
59
  return "cpu"
60
  else:
61
  return "cpu"
62
-
63
  def _initialize_pipeline(self):
64
  """Initialize the Flux pipeline with memory optimizations"""
65
  try:
66
  print("Loading Flux model...")
67
- # Use more memory-efficient settings
68
  pipe_kwargs = {
69
  "torch_dtype": torch.bfloat16 if self.device == "cuda" else torch.float32,
70
  }
71
-
72
- # Initialize the pipeline
73
- self.pipe = FluxPipeline.from_pretrained(
74
- Config.MODEL_ID,
75
- **pipe_kwargs
76
- )
77
-
78
  # Apply memory optimizations
79
  if Config.ENABLE_MEMORY_EFFICIENT_ATTENTION and self.device == "cuda":
80
  print("Enabling memory efficient attention")
81
  self.pipe.enable_xformers_memory_efficient_attention()
82
-
83
  if Config.ENABLE_ATTENTION_SLICING:
84
  print("Enabling attention slicing")
85
  self.pipe.enable_attention_slicing(Config.ENABLE_ATTENTION_SLICING)
86
-
87
  if Config.ENABLE_SEQUENTIAL_CPU_OFFLOAD and self.device == "cuda":
88
  print("Enabling sequential CPU offload")
89
  self.pipe.enable_sequential_cpu_offload()
90
  else:
91
- # Only move to device if not using CPU offload
92
- self.pipe = self.pipe.to(self.device)
93
-
94
- # Load default LoRA
95
  print(f"Loading default LoRA: {Config.DEFAULT_LORA}")
96
  self.pipe.load_lora_weights(Config.DEFAULT_LORA, weight_name=Config.DEFAULT_WEIGHT_NAME)
97
-
98
  print("Model initialization complete")
99
- return self.pipe
100
-
101
  except Exception as e:
102
  error_msg = f"Error initializing pipeline: {str(e)}"
103
  print(error_msg)
@@ -106,18 +92,17 @@ class FluxGenerator:
106
  def load_lora(self, lora_path):
107
  """Load a new LoRA model"""
108
  try:
109
- print(f"Unloading previous LoRA weights...")
110
  self.pipe.unload_lora_weights()
111
-
112
  if not lora_path:
113
- print("No LoRA path provided, skipping LoRA loading")
114
  return gr.update(value="")
115
-
116
  print(f"Loading LoRA from {lora_path}...")
117
  self.pipe.load_lora_weights(lora_path)
118
- print("LoRA loaded successfully")
119
  return gr.update(label="LoRA Loaded Successfully")
120
-
121
  except Exception as e:
122
  error_msg = f"Failed to load LoRA from {lora_path}: {str(e)}"
123
  print(error_msg)
@@ -129,50 +114,43 @@ class FluxGenerator:
129
  try:
130
  print("Clearing CUDA memory cache...")
131
  torch.cuda.empty_cache()
132
- if hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
133
  torch.cuda.amp.clear_autocast_cache()
134
  except Exception as e:
135
  print(f"Warning: Failed to clear CUDA memory: {str(e)}")
136
 
137
- @spaces.GPU()
138
  def generate(self, prompt, lora_word, lora_scale=Config.DEFAULT_LORA_SCALE,
139
  width=Config.DEFAULT_WIDTH, height=Config.DEFAULT_HEIGHT,
140
- guidance_scale=Config.DEFAULT_GUIDANCE_SCALE, steps=Config.DEFAULT_STEPS,
141
- seed=-1, num_images=1):
142
- """Generate images from a prompt with memory optimizations"""
143
  try:
144
  print(f"Generating image for prompt: '{prompt}'")
145
-
146
- # Clear memory before generation
147
  self._clear_memory()
148
-
149
- # Ensure we're using the right device
150
  if not Config.ENABLE_SEQUENTIAL_CPU_OFFLOAD:
151
  print(f"Moving model to {self.device}")
152
  self.pipe.to(self.device)
153
-
154
- # Handle seed
155
  seed = random.randint(0, Config.MAX_SEED) if seed == -1 else int(seed)
156
  print(f"Using seed: {seed}")
157
  generator = torch.Generator(device=self.device).manual_seed(seed)
158
-
159
- # Translate prompt if not in English
160
  print("Translating prompt if needed...")
161
  prompt_english = str(self.translator.translate(prompt, "English"))
162
  full_prompt = f"{prompt_english} {lora_word}"
163
  print(f"Full prompt: '{full_prompt}'")
164
-
165
- # Lower resolution if on limited memory
166
- if self.device == "cuda" and torch.cuda.get_device_properties(0).total_memory < 8 * 1024 * 1024 * 1024:
 
167
  original_width, original_height = width, height
168
- # Scale down to 85% if memory is tight
169
  width = int(width * 0.85)
170
  height = int(height * 0.85)
171
- print(f"Limited memory detected. Scaling down resolution from {original_width}x{original_height} to {width}x{height}")
172
-
173
- # Generate with autocast for memory efficiency
174
  print(f"Starting generation with {steps} steps, guidance scale {guidance_scale}")
175
- with torch.cuda.amp.autocast(enabled=self.device == "cuda"):
176
  result = self.pipe(
177
  prompt=full_prompt,
178
  height=height,
@@ -184,15 +162,12 @@ class FluxGenerator:
184
  generator=generator,
185
  joint_attention_kwargs={"scale": lora_scale},
186
  )
187
-
188
- print("Generation complete, returning images")
189
- self._clear_memory() # Clear memory after generation
190
  return result.images, seed
191
-
192
  except Exception as e:
193
  error_msg = f"Image generation failed: {str(e)}"
194
  print(error_msg)
195
- # Clear memory after error
196
  self._clear_memory()
197
  raise gr.Error(error_msg)
198
 
@@ -209,135 +184,68 @@ class FluxUI:
209
  ["full-body shot, ohwx blue alien, wearing black techwear with a high collar, black cyber sneakers, running through a neon-lit cyberpunk alley at night.", "ohwx", 0.9],
210
  ["ohwx blue alien, wearing black techwear with a high collar, sitting inside a sleek, high-tech VR capsule, immersed in an augmented reality experience.", "ohwx", 0.9]
211
  ]
212
-
213
  def build(self):
214
- """Build and return the Gradio interface"""
215
  with gr.Blocks(css=Config.CSS) as demo:
216
  gr.HTML("<h1><center>BR METAVERSO - Avatar Generator</center></h1>")
217
-
218
- # Status indicator
219
- processing_status = gr.Markdown("**🟢 Ready**", visible=True)
220
-
221
  with gr.Row():
222
  with gr.Column(scale=4):
223
  gallery = gr.Gallery(label="Flux Generated Image", columns=1, preview=True, height=600)
224
  prompt_input = gr.Textbox(
225
  label="Enter Your Prompt",
226
  lines=2,
227
- placeholder="Enter prompt for your avatar..."
228
  )
229
  generate_btn = gr.Button(value="Generate", variant="primary")
230
-
231
  with gr.Accordion("Advanced Options", open=True):
232
  with gr.Row():
233
  with gr.Column():
234
- width_slider = gr.Slider(
235
- label="Width",
236
- minimum=512,
237
- maximum=1920,
238
- step=8,
239
- value=Config.DEFAULT_WIDTH
240
- )
241
- height_slider = gr.Slider(
242
- label="Height",
243
- minimum=512,
244
- maximum=1920,
245
- step=8,
246
- value=Config.DEFAULT_HEIGHT
247
- )
248
  with gr.Column():
249
- guidance_slider = gr.Slider(
250
- label="Guidance Scale",
251
- minimum=3.5,
252
- maximum=7,
253
- step=0.1,
254
- value=Config.DEFAULT_GUIDANCE_SCALE
255
- )
256
- steps_slider = gr.Slider(
257
- label="Steps",
258
- minimum=1,
259
- maximum=100,
260
- step=1,
261
- value=Config.DEFAULT_STEPS
262
- )
263
-
264
  with gr.Row():
265
  with gr.Column():
266
- seed_slider = gr.Slider(
267
- label="Seed (-1 for random)",
268
- minimum=-1,
269
- maximum=Config.MAX_SEED,
270
- step=1,
271
- value=-1
272
- )
273
- nums_slider = gr.Slider(
274
- label="Image Count",
275
- minimum=1,
276
- maximum=2,
277
- step=1,
278
- value=1
279
- )
280
  with gr.Column():
281
- lora_scale_slider = gr.Slider(
282
- label="LoRA Scale",
283
- minimum=0.1,
284
- maximum=2.0,
285
- step=0.1,
286
- value=Config.DEFAULT_LORA_SCALE
287
- )
288
-
289
  with gr.Row():
290
  with gr.Column():
291
- lora_add_text = gr.Textbox(
292
- label="Flux LoRA Path",
293
- lines=1,
294
- value=Config.DEFAULT_LORA
295
- )
296
  with gr.Column():
297
- lora_word_text = gr.Textbox(
298
- label="Flux LoRA Trigger Word",
299
- lines=1,
300
- value=Config.DEFAULT_TRIGGER_WORD
301
- )
302
-
303
  load_lora_btn = gr.Button(value="Load Custom LoRA", variant="secondary")
304
-
305
- # Memory optimization checkbox
306
- with gr.Row():
307
- memory_efficient = gr.Checkbox(
308
- label="Enable Memory Optimizations",
309
- value=True,
310
- info="Reduces memory usage but may increase generation time"
311
- )
312
-
313
- # Examples section
314
  gr.Examples(
315
  examples=self.example_prompts,
316
  inputs=[prompt_input, lora_word_text, lora_scale_slider],
 
317
  cache_examples=False,
318
  examples_per_page=4
319
  )
320
-
321
- # Wire up the event handlers
322
- # Status update functions
323
  def update_status_processing():
324
  return "**⏳ Processing...**"
325
-
326
  def update_status_done():
327
  return "**✅ Done!**"
328
-
329
- def update_memory_settings(enable_memory_opt):
330
- global Config
331
- Config.ENABLE_MEMORY_EFFICIENT_ATTENTION = enable_memory_opt
332
- Config.ENABLE_SEQUENTIAL_CPU_OFFLOAD = enable_memory_opt
333
- Config.ENABLE_ATTENTION_SLICING = "max" if enable_memory_opt else None
334
- return gr.update()
335
-
336
- # Generate button click workflow
337
  generate_btn.click(
338
- fn=update_status_processing,
339
- inputs=[],
340
- outputs=[processing_status]
341
  ).then(
342
  fn=self.generator.generate,
343
  inputs=[
@@ -349,45 +257,34 @@ class FluxUI:
349
  ).then(
350
  fn=update_status_done,
351
  inputs=[],
352
- outputs=[processing_status]
353
  )
354
-
355
- # Load LoRA button click workflow
356
  load_lora_btn.click(
357
  fn=self.generator.load_lora,
358
  inputs=[lora_add_text],
359
  outputs=[lora_add_text]
360
  )
361
-
362
- # Memory optimization checkbox event
363
- memory_efficient.change(
364
- fn=update_memory_settings,
365
- inputs=[memory_efficient],
366
- outputs=[]
367
- )
368
-
369
- return demo
370
 
 
371
 
372
  # -----------------------------------------------------------------------------
373
- # Main application
374
  # -----------------------------------------------------------------------------
375
  def main():
376
  try:
377
- # Create a generator with memory optimizations
378
  generator = FluxGenerator()
379
-
380
- # Build and launch UI
381
  ui = FluxUI(generator)
382
  demo = ui.build()
383
-
384
- # Launch with low cache size to prevent memory issues
385
- demo.queue(max_size=1).launch(share=False)
386
-
387
  except Exception as e:
388
  print(f"Application startup failed: {str(e)}")
389
- # Show error in UI if possible
390
  with gr.Blocks() as error_demo:
391
- gr.Markdown(f"# Error Starting Application\n\n{str(e)}\n\nPlease check the logs for more details.")
392
- gr.Markdown("This might be due to memory limitations or GPU compatibility issues.")
393
- error_demo.launch()
 
 
 
 
3
  import torch
4
  import numpy as np
5
  import gradio as gr
 
6
  from diffusers import FluxPipeline
7
  from translatepy import Translator
8
 
 
26
  ENABLE_SEQUENTIAL_CPU_OFFLOAD = True
27
  ENABLE_ATTENTION_SLICING = "max"
28
 
 
29
  # -----------------------------------------------------------------------------
30
  # FluxGenerator class to handle image generation
31
  # -----------------------------------------------------------------------------
32
  class FluxGenerator:
33
  def __init__(self):
 
34
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
35
  self.translator = Translator()
36
  self.device = self._get_optimal_device()
37
  print(f"Using {self.device.upper()}")
38
+
 
39
  self.pipe = None
40
  self._initialize_pipeline()
41
+
42
  def _get_optimal_device(self):
43
  """Determine the optimal device based on available resources"""
44
  if torch.cuda.is_available():
 
45
  try:
46
  gpu_memory = torch.cuda.get_device_properties(0).total_memory
47
  if gpu_memory > 10 * 1024 * 1024 * 1024: # More than 10GB
48
  return "cuda"
49
  else:
50
+ print("Limited GPU memory detected. Will still use CUDA with memory optimizations.")
51
+ return "cuda"
52
  except:
53
  print("Error checking GPU memory, falling back to CPU")
54
  return "cpu"
55
  else:
56
  return "cpu"
57
+
58
  def _initialize_pipeline(self):
59
  """Initialize the Flux pipeline with memory optimizations"""
60
  try:
61
  print("Loading Flux model...")
 
62
  pipe_kwargs = {
63
  "torch_dtype": torch.bfloat16 if self.device == "cuda" else torch.float32,
64
  }
65
+ self.pipe = FluxPipeline.from_pretrained(Config.MODEL_ID, **pipe_kwargs)
66
+
 
 
 
 
 
67
  # Apply memory optimizations
68
  if Config.ENABLE_MEMORY_EFFICIENT_ATTENTION and self.device == "cuda":
69
  print("Enabling memory efficient attention")
70
  self.pipe.enable_xformers_memory_efficient_attention()
71
+
72
  if Config.ENABLE_ATTENTION_SLICING:
73
  print("Enabling attention slicing")
74
  self.pipe.enable_attention_slicing(Config.ENABLE_ATTENTION_SLICING)
75
+
76
  if Config.ENABLE_SEQUENTIAL_CPU_OFFLOAD and self.device == "cuda":
77
  print("Enabling sequential CPU offload")
78
  self.pipe.enable_sequential_cpu_offload()
79
  else:
80
+ # Only move to device if not offloading
81
+ self.pipe.to(self.device)
82
+
 
83
  print(f"Loading default LoRA: {Config.DEFAULT_LORA}")
84
  self.pipe.load_lora_weights(Config.DEFAULT_LORA, weight_name=Config.DEFAULT_WEIGHT_NAME)
85
+
86
  print("Model initialization complete")
 
 
87
  except Exception as e:
88
  error_msg = f"Error initializing pipeline: {str(e)}"
89
  print(error_msg)
 
92
  def load_lora(self, lora_path):
93
  """Load a new LoRA model"""
94
  try:
95
+ print("Unloading previous LoRA weights...")
96
  self.pipe.unload_lora_weights()
97
+
98
  if not lora_path:
99
+ print("No LoRA path provided, skipping LoRA loading.")
100
  return gr.update(value="")
101
+
102
  print(f"Loading LoRA from {lora_path}...")
103
  self.pipe.load_lora_weights(lora_path)
104
+ print("LoRA loaded successfully.")
105
  return gr.update(label="LoRA Loaded Successfully")
 
106
  except Exception as e:
107
  error_msg = f"Failed to load LoRA from {lora_path}: {str(e)}"
108
  print(error_msg)
 
114
  try:
115
  print("Clearing CUDA memory cache...")
116
  torch.cuda.empty_cache()
117
+ if hasattr(torch.cuda, "amp") and hasattr(torch.cuda.amp, "autocast"):
118
  torch.cuda.amp.clear_autocast_cache()
119
  except Exception as e:
120
  print(f"Warning: Failed to clear CUDA memory: {str(e)}")
121
 
 
122
  def generate(self, prompt, lora_word, lora_scale=Config.DEFAULT_LORA_SCALE,
123
  width=Config.DEFAULT_WIDTH, height=Config.DEFAULT_HEIGHT,
124
+ guidance_scale=Config.DEFAULT_GUIDANCE_SCALE,
125
+ steps=Config.DEFAULT_STEPS, seed=-1, num_images=1):
126
+ """Generate images from a prompt with memory optimizations."""
127
  try:
128
  print(f"Generating image for prompt: '{prompt}'")
 
 
129
  self._clear_memory()
130
+
 
131
  if not Config.ENABLE_SEQUENTIAL_CPU_OFFLOAD:
132
  print(f"Moving model to {self.device}")
133
  self.pipe.to(self.device)
134
+
 
135
  seed = random.randint(0, Config.MAX_SEED) if seed == -1 else int(seed)
136
  print(f"Using seed: {seed}")
137
  generator = torch.Generator(device=self.device).manual_seed(seed)
138
+
 
139
  print("Translating prompt if needed...")
140
  prompt_english = str(self.translator.translate(prompt, "English"))
141
  full_prompt = f"{prompt_english} {lora_word}"
142
  print(f"Full prompt: '{full_prompt}'")
143
+
144
+ # If GPU memory is less than 8GB, scale resolution
145
+ if (self.device == "cuda" and
146
+ torch.cuda.get_device_properties(0).total_memory < 8 * 1024 * 1024 * 1024):
147
  original_width, original_height = width, height
 
148
  width = int(width * 0.85)
149
  height = int(height * 0.85)
150
+ print(f"Memory is tight. Scaled resolution from {original_width}x{original_height} to {width}x{height}")
151
+
 
152
  print(f"Starting generation with {steps} steps, guidance scale {guidance_scale}")
153
+ with torch.autocast("cuda", enabled=(self.device == "cuda")):
154
  result = self.pipe(
155
  prompt=full_prompt,
156
  height=height,
 
162
  generator=generator,
163
  joint_attention_kwargs={"scale": lora_scale},
164
  )
165
+ print("Generation complete, returning images.")
166
+ self._clear_memory()
 
167
  return result.images, seed
 
168
  except Exception as e:
169
  error_msg = f"Image generation failed: {str(e)}"
170
  print(error_msg)
 
171
  self._clear_memory()
172
  raise gr.Error(error_msg)
173
 
 
184
  ["full-body shot, ohwx blue alien, wearing black techwear with a high collar, black cyber sneakers, running through a neon-lit cyberpunk alley at night.", "ohwx", 0.9],
185
  ["ohwx blue alien, wearing black techwear with a high collar, sitting inside a sleek, high-tech VR capsule, immersed in an augmented reality experience.", "ohwx", 0.9]
186
  ]
187
+
188
  def build(self):
 
189
  with gr.Blocks(css=Config.CSS) as demo:
190
  gr.HTML("<h1><center>BR METAVERSO - Avatar Generator</center></h1>")
191
+
192
+ status_markdown = gr.Markdown("**🟢 Ready**", visible=True)
193
+
 
194
  with gr.Row():
195
  with gr.Column(scale=4):
196
  gallery = gr.Gallery(label="Flux Generated Image", columns=1, preview=True, height=600)
197
  prompt_input = gr.Textbox(
198
  label="Enter Your Prompt",
199
  lines=2,
200
+ placeholder="Type your avatar description..."
201
  )
202
  generate_btn = gr.Button(value="Generate", variant="primary")
203
+
204
  with gr.Accordion("Advanced Options", open=True):
205
  with gr.Row():
206
  with gr.Column():
207
+ width_slider = gr.Slider(label="Width", minimum=512, maximum=1920, step=8, value=Config.DEFAULT_WIDTH)
208
+ height_slider = gr.Slider(label="Height", minimum=512, maximum=1920, step=8, value=Config.DEFAULT_HEIGHT)
 
 
 
 
 
 
 
 
 
 
 
 
209
  with gr.Column():
210
+ guidance_slider = gr.Slider(label="Guidance Scale", minimum=3.5, maximum=7, step=0.1, value=Config.DEFAULT_GUIDANCE_SCALE)
211
+ steps_slider = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=Config.DEFAULT_STEPS)
212
+
 
 
 
 
 
 
 
 
 
 
 
 
213
  with gr.Row():
214
  with gr.Column():
215
+ seed_slider = gr.Slider(label="Seed (-1 random)", minimum=-1, maximum=Config.MAX_SEED, step=1, value=-1)
216
+ nums_slider = gr.Slider(label="Image Count", minimum=1, maximum=2, step=1, value=1)
 
 
 
 
 
 
 
 
 
 
 
 
217
  with gr.Column():
218
+ lora_scale_slider = gr.Slider(label="LoRA Scale", minimum=0.1, maximum=2.0, step=0.1, value=Config.DEFAULT_LORA_SCALE)
219
+
 
 
 
 
 
 
220
  with gr.Row():
221
  with gr.Column():
222
+ lora_add_text = gr.Textbox(label="Flux LoRA Path", lines=1, value=Config.DEFAULT_LORA)
 
 
 
 
223
  with gr.Column():
224
+ lora_word_text = gr.Textbox(label="Flux LoRA Trigger Word", lines=1, value=Config.DEFAULT_TRIGGER_WORD)
225
+
 
 
 
 
226
  load_lora_btn = gr.Button(value="Load Custom LoRA", variant="secondary")
227
+
228
+ # Examples
 
 
 
 
 
 
 
 
229
  gr.Examples(
230
  examples=self.example_prompts,
231
  inputs=[prompt_input, lora_word_text, lora_scale_slider],
232
+ outputs=[],
233
  cache_examples=False,
234
  examples_per_page=4
235
  )
236
+
237
+ # Helper functions for UI status
 
238
  def update_status_processing():
239
  return "**⏳ Processing...**"
240
+
241
  def update_status_done():
242
  return "**✅ Done!**"
243
+
244
+ # Workflow for generate
 
 
 
 
 
 
 
245
  generate_btn.click(
246
+ fn=update_status_processing,
247
+ inputs=[],
248
+ outputs=[status_markdown]
249
  ).then(
250
  fn=self.generator.generate,
251
  inputs=[
 
257
  ).then(
258
  fn=update_status_done,
259
  inputs=[],
260
+ outputs=[status_markdown]
261
  )
262
+
263
+ # Load LoRA
264
  load_lora_btn.click(
265
  fn=self.generator.load_lora,
266
  inputs=[lora_add_text],
267
  outputs=[lora_add_text]
268
  )
 
 
 
 
 
 
 
 
 
269
 
270
+ return demo
271
 
272
  # -----------------------------------------------------------------------------
273
+ # Main entry point
274
  # -----------------------------------------------------------------------------
275
  def main():
276
  try:
 
277
  generator = FluxGenerator()
 
 
278
  ui = FluxUI(generator)
279
  demo = ui.build()
280
+ # Launch with default queue
281
+ demo.queue().launch()
 
 
282
  except Exception as e:
283
  print(f"Application startup failed: {str(e)}")
 
284
  with gr.Blocks() as error_demo:
285
+ gr.Markdown(f"# Error Starting Application\n\n{str(e)}")
286
+ gr.Markdown("Check logs for more details.")
287
+ error_demo.launch()
288
+
289
+ if __name__ == "__main__":
290
+ main()