sagar007 commited on
Commit
f216857
·
verified ·
1 Parent(s): fff1212

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -92
app.py CHANGED
@@ -18,9 +18,7 @@ try:
18
 
19
  # Try installing espeak with proper package manager commands
20
  try:
21
- # Update package list first
22
  subprocess.run(['apt-get', 'update'], check=True)
23
- # Try installing espeak first (more widely available)
24
  subprocess.run(['apt-get', 'install', '-y', 'espeak'], check=True)
25
  except subprocess.CalledProcessError:
26
  print("Warning: Could not install espeak. Attempting espeak-ng...")
@@ -33,7 +31,6 @@ except Exception as e:
33
  print(f"Warning: Initial setup error: {str(e)}")
34
  print("Continuing with limited functionality...")
35
 
36
-
37
  # --- Initialization (Do this ONCE) ---
38
 
39
  model_name = "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
@@ -66,7 +63,7 @@ try:
66
  from models import build_model # type: ignore
67
  from kokoro import generate # type: ignore
68
 
69
- device = 'cuda' if torch.cuda.is_available() else 'cpu' # Correct device handling
70
  TTS_MODEL = build_model('Kokoro-82M/kokoro-v0_19.pth', device)
71
 
72
  # Load default voice
@@ -83,8 +80,6 @@ except Exception as e:
83
  print(f"Warning: Could not initialize Kokoro TTS: {str(e)}")
84
  TTS_ENABLED = False
85
 
86
-
87
-
88
  def get_web_results(query: str, max_results: int = 5) -> List[Dict[str, str]]:
89
  """Get web search results using DuckDuckGo"""
90
  try:
@@ -100,27 +95,19 @@ def get_web_results(query: str, max_results: int = 5) -> List[Dict[str, str]]:
100
  print(f"Error in web search: {e}")
101
  return []
102
 
103
-
104
  def format_prompt(query: str, context: List[Dict[str, str]]) -> str:
105
  """Format the prompt with web context"""
106
  current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
107
  context_lines = '\n'.join([f'- [{res["title"]}]: {res["snippet"]}' for res in context])
108
  return f"""You are an intelligent search assistant. Answer the user's query using the provided web context.
109
-
110
  Current Time: {current_time}
111
-
112
  Important: For election-related queries, please distinguish clearly between different election years and types (presidential vs. non-presidential). Only use information from the provided web context.
113
-
114
  Query: {query}
115
-
116
  Web Context:
117
  {context_lines}
118
-
119
  Provide a detailed answer in markdown format. Include relevant information from sources and cite them using [1], [2], etc. If the query is about elections, clearly specify which year and type of election you're discussing.
120
-
121
  Answer:"""
122
 
123
-
124
  def format_sources(web_results: List[Dict[str, str]]) -> str:
125
  """Format sources with more details"""
126
  if not web_results:
@@ -143,7 +130,6 @@ def format_sources(web_results: List[Dict[str, str]]) -> str:
143
  sources_html += "</div>"
144
  return sources_html
145
 
146
-
147
  @spaces.GPU(duration=30)
148
  def generate_answer(prompt: str) -> str:
149
  """Generate answer using the DeepSeek model"""
@@ -168,47 +154,59 @@ def generate_answer(prompt: str) -> str:
168
  )
169
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
170
 
171
-
172
-
173
  @spaces.GPU(duration=30)
174
- def generate_speech_with_gpu(text: str, voice_name: str = 'af', tts_model = TTS_MODEL, voicepack = VOICEPACK) -> Tuple[int, np.ndarray] | None:
175
  """Generate speech from text using Kokoro TTS model."""
176
  if not TTS_ENABLED or tts_model is None:
177
  print("TTS is not enabled or model is not loaded.")
178
  return None
179
 
180
  try:
181
- # Load voicepack if it hasn't been loaded or if a different voice is requested
182
- if voice_name != 'af' or voicepack is None :
183
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
184
- voicepack = torch.load(f'Kokoro-82M/voices/{voice_name}.pt', map_location=device, weights_only=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
  # Clean the text
187
  clean_text = ' '.join([line for line in text.split('\n') if not line.startswith('#')])
188
  clean_text = clean_text.replace('[', '').replace(']', '').replace('*', '')
189
 
190
- # Split long text into chunks (improved logic)
191
  max_chars = 1000
192
  chunks = []
193
  if len(clean_text) > max_chars:
194
  sentences = clean_text.split('.')
195
  current_chunk = ""
196
  for sentence in sentences:
197
- if len(current_chunk) + len(sentence) + 1 < max_chars: # +1 for the dot
198
  current_chunk += sentence + "."
199
  else:
200
  chunks.append(current_chunk.strip())
201
  current_chunk = sentence + "."
202
- if current_chunk: # Add the last chunk
203
  chunks.append(current_chunk.strip())
204
  else:
205
  chunks = [clean_text]
206
 
207
-
208
  # Generate audio for each chunk
209
  audio_chunks = []
210
  for chunk in chunks:
211
- if chunk.strip(): # Only process non-empty chunks
212
  chunk_audio, _ = generate(tts_model, chunk, voicepack, lang='a')
213
  if isinstance(chunk_audio, torch.Tensor):
214
  chunk_audio = chunk_audio.cpu().numpy()
@@ -223,12 +221,8 @@ def generate_speech_with_gpu(text: str, voice_name: str = 'af', tts_model = TTS_
223
 
224
  except Exception as e:
225
  print(f"Error generating speech: {str(e)}")
226
- import traceback
227
- traceback.print_exc()
228
  return None
229
 
230
-
231
-
232
  def process_query(query: str, history: List[List[str]], selected_voice: str = 'af') -> Dict[str, Any]:
233
  """Process user query with streaming effect"""
234
  try:
@@ -242,11 +236,11 @@ def process_query(query: str, history: List[List[str]], selected_voice: str = 'a
242
  current_history = history + [[query, "*Searching...*"]]
243
 
244
  yield {
245
- answer_output: gr.Markdown("*Searching & Thinking...*"),
246
- sources_output: gr.HTML(sources_html),
247
- search_btn: gr.Button("Searching...", interactive=False),
248
- chat_history_display: current_history,
249
- audio_output: None
250
  }
251
 
252
  # Generate answer
@@ -254,47 +248,47 @@ def process_query(query: str, history: List[List[str]], selected_voice: str = 'a
254
  answer = generate_answer(prompt)
255
  final_answer = answer.split("Answer:")[-1].strip()
256
 
257
- # Update history *before* TTS (important for correct display)
258
  updated_history = history + [[query, final_answer]]
259
 
260
-
261
  # Generate speech from the answer (only if enabled)
262
  if TTS_ENABLED:
263
- yield { # Intermediate update before TTS
264
- answer_output: gr.Markdown(final_answer),
265
- sources_output: gr.HTML(sources_html),
266
- search_btn: gr.Button("Generating audio...", interactive=False),
267
- chat_history_display: updated_history,
268
- audio_output: None
269
  }
270
  try:
271
  audio = generate_speech_with_gpu(final_answer, selected_voice)
 
 
272
  except Exception as e:
273
- print(f"Error during TTS: {e}")
274
  audio = None
275
  else:
 
276
  audio = None
277
 
278
-
279
-
280
  yield {
281
- answer_output: gr.Markdown(final_answer),
282
- sources_output: gr.HTML(sources_html),
283
- search_btn: gr.Button("Search", interactive=True),
284
- chat_history_display: updated_history,
285
- audio_output: audio if audio is not None else gr.Audio(value=None) # Ensure valid audio output
286
  }
287
 
288
  except Exception as e:
289
  error_message = str(e)
290
  if "GPU quota" in error_message:
291
- error_message = "⚠️ GPU quota exceeded. Please try again later when the daily quota resets."
292
  yield {
293
- answer_output: gr.Markdown(f"Error: {error_message}"),
294
- sources_output: gr.HTML(sources_html), #Still show sources on error
295
- search_btn: gr.Button("Search", interactive=True),
296
- chat_history_display: history + [[query, f"*Error: {error_message}*"]],
297
- audio_output: None
298
  }
299
 
300
  # Update the CSS for better contrast and readability
@@ -303,7 +297,6 @@ css = """
303
  max-width: 1200px !important;
304
  background-color: #f7f7f8 !important;
305
  }
306
-
307
  #header {
308
  text-align: center;
309
  margin-bottom: 2rem;
@@ -312,17 +305,14 @@ css = """
312
  border-radius: 12px;
313
  color: white;
314
  }
315
-
316
  #header h1 {
317
  color: white;
318
  font-size: 2.5rem;
319
  margin-bottom: 0.5rem;
320
  }
321
-
322
  #header h3 {
323
  color: #a8a9ab;
324
  }
325
-
326
  .search-container {
327
  background: #1a1b1e;
328
  border-radius: 12px;
@@ -330,40 +320,31 @@ css = """
330
  padding: 1rem;
331
  margin-bottom: 1rem;
332
  }
333
-
334
  .search-box {
335
  padding: 1rem;
336
  background: #2c2d30;
337
  border-radius: 8px;
338
  margin-bottom: 1rem;
339
  }
340
-
341
- /* Style the input textbox */
342
  .search-box input[type="text"] {
343
  background: #3a3b3e !important;
344
  border: 1px solid #4a4b4e !important;
345
  color: white !important;
346
  border-radius: 8px !important;
347
  }
348
-
349
  .search-box input[type="text"]::placeholder {
350
  color: #a8a9ab !important;
351
  }
352
-
353
- /* Style the search button */
354
  .search-box button {
355
  background: #2563eb !important;
356
  border: none !important;
357
  }
358
-
359
- /* Results area styling */
360
  .results-container {
361
  background: #2c2d30;
362
  border-radius: 8px;
363
  padding: 1rem;
364
  margin-top: 1rem;
365
  }
366
-
367
  .answer-box {
368
  background: #3a3b3e;
369
  border-radius: 8px;
@@ -371,19 +352,16 @@ css = """
371
  color: white;
372
  margin-bottom: 1rem;
373
  }
374
-
375
  .answer-box p {
376
  color: #e5e7eb;
377
  line-height: 1.6;
378
  }
379
-
380
  .sources-container {
381
  margin-top: 1rem;
382
  background: #2c2d30;
383
  border-radius: 8px;
384
  padding: 1rem;
385
  }
386
-
387
  .source-item {
388
  display: flex;
389
  padding: 12px;
@@ -392,21 +370,17 @@ css = """
392
  border-radius: 8px;
393
  transition: all 0.2s;
394
  }
395
-
396
  .source-item:hover {
397
  background: #4a4b4e;
398
  }
399
-
400
  .source-number {
401
  font-weight: bold;
402
  margin-right: 12px;
403
  color: #60a5fa;
404
  }
405
-
406
  .source-content {
407
  flex: 1;
408
  }
409
-
410
  .source-title {
411
  color: #60a5fa;
412
  font-weight: 500;
@@ -414,19 +388,16 @@ css = """
414
  display: block;
415
  margin-bottom: 4px;
416
  }
417
-
418
  .source-date {
419
  color: #a8a9ab;
420
  font-size: 0.9em;
421
  margin-left: 8px;
422
  }
423
-
424
  .source-snippet {
425
  color: #e5e7eb;
426
  font-size: 0.9em;
427
  line-height: 1.4;
428
  }
429
-
430
  .chat-history {
431
  max-height: 400px;
432
  overflow-y: auto;
@@ -435,47 +406,37 @@ css = """
435
  border-radius: 8px;
436
  margin-top: 1rem;
437
  }
438
-
439
  .examples-container {
440
  background: #2c2d30;
441
  border-radius: 8px;
442
  padding: 1rem;
443
  margin-top: 1rem;
444
  }
445
-
446
  .examples-container button {
447
  background: #3a3b3e !important;
448
  border: 1px solid #4a4b4e !important;
449
  color: #e5e7eb !important;
450
  }
451
-
452
- /* Markdown content styling */
453
  .markdown-content {
454
  color: #e5e7eb !important;
455
  }
456
-
457
  .markdown-content h1, .markdown-content h2, .markdown-content h3 {
458
  color: white !important;
459
  }
460
-
461
  .markdown-content a {
462
  color: #60a5fa !important;
463
  }
464
-
465
- /* Accordion styling */
466
  .accordion {
467
  background: #2c2d30 !important;
468
  border-radius: 8px !important;
469
  margin-top: 1rem !important;
470
  }
471
-
472
  .voice-selector {
473
  margin-top: 1rem;
474
  background: #2c2d30;
475
  border-radius: 8px;
476
  padding: 0.5rem;
477
  }
478
-
479
  .voice-selector select {
480
  background: #3a3b3e !important;
481
  color: white !important;
@@ -547,4 +508,4 @@ with gr.Blocks(title="AI Search Assistant", css=css, theme="dark") as demo:
547
  )
548
 
549
  if __name__ == "__main__":
550
- demo.launch(share=True)
 
18
 
19
  # Try installing espeak with proper package manager commands
20
  try:
 
21
  subprocess.run(['apt-get', 'update'], check=True)
 
22
  subprocess.run(['apt-get', 'install', '-y', 'espeak'], check=True)
23
  except subprocess.CalledProcessError:
24
  print("Warning: Could not install espeak. Attempting espeak-ng...")
 
31
  print(f"Warning: Initial setup error: {str(e)}")
32
  print("Continuing with limited functionality...")
33
 
 
34
  # --- Initialization (Do this ONCE) ---
35
 
36
  model_name = "deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
 
63
  from models import build_model # type: ignore
64
  from kokoro import generate # type: ignore
65
 
66
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
67
  TTS_MODEL = build_model('Kokoro-82M/kokoro-v0_19.pth', device)
68
 
69
  # Load default voice
 
80
  print(f"Warning: Could not initialize Kokoro TTS: {str(e)}")
81
  TTS_ENABLED = False
82
 
 
 
83
  def get_web_results(query: str, max_results: int = 5) -> List[Dict[str, str]]:
84
  """Get web search results using DuckDuckGo"""
85
  try:
 
95
  print(f"Error in web search: {e}")
96
  return []
97
 
 
98
  def format_prompt(query: str, context: List[Dict[str, str]]) -> str:
99
  """Format the prompt with web context"""
100
  current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
101
  context_lines = '\n'.join([f'- [{res["title"]}]: {res["snippet"]}' for res in context])
102
  return f"""You are an intelligent search assistant. Answer the user's query using the provided web context.
 
103
  Current Time: {current_time}
 
104
  Important: For election-related queries, please distinguish clearly between different election years and types (presidential vs. non-presidential). Only use information from the provided web context.
 
105
  Query: {query}
 
106
  Web Context:
107
  {context_lines}
 
108
  Provide a detailed answer in markdown format. Include relevant information from sources and cite them using [1], [2], etc. If the query is about elections, clearly specify which year and type of election you're discussing.
 
109
  Answer:"""
110
 
 
111
  def format_sources(web_results: List[Dict[str, str]]) -> str:
112
  """Format sources with more details"""
113
  if not web_results:
 
130
  sources_html += "</div>"
131
  return sources_html
132
 
 
133
  @spaces.GPU(duration=30)
134
  def generate_answer(prompt: str) -> str:
135
  """Generate answer using the DeepSeek model"""
 
154
  )
155
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
156
 
 
 
157
  @spaces.GPU(duration=30)
158
+ def generate_speech_with_gpu(text: str, voice_name: str = 'af', tts_model=TTS_MODEL, voicepack=VOICEPACK) -> Tuple[int, np.ndarray] | None:
159
  """Generate speech from text using Kokoro TTS model."""
160
  if not TTS_ENABLED or tts_model is None:
161
  print("TTS is not enabled or model is not loaded.")
162
  return None
163
 
164
  try:
165
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
166
+
167
+ # Handle voicepack loading
168
+ voice_file = f'Kokoro-82M/voices/{voice_name}.pt'
169
+ if voice_name == 'af' and voicepack is not None:
170
+ # Use the pre-loaded default voicepack
171
+ pass
172
+ elif os.path.exists(voice_file):
173
+ # Load the selected voicepack if it exists
174
+ voicepack = torch.load(voice_file, map_location=device, weights_only=True)
175
+ else:
176
+ # Fall back to default 'af' if selected voicepack is missing
177
+ print(f"Voicepack {voice_name}.pt not found. Falling back to default 'af'.")
178
+ voice_file = 'Kokoro-82M/voices/af.pt'
179
+ if os.path.exists(voice_file):
180
+ voicepack = torch.load(voice_file, map_location=device, weights_only=True)
181
+ else:
182
+ print("Default voicepack 'af.pt' not found. Cannot generate audio.")
183
+ return None
184
 
185
  # Clean the text
186
  clean_text = ' '.join([line for line in text.split('\n') if not line.startswith('#')])
187
  clean_text = clean_text.replace('[', '').replace(']', '').replace('*', '')
188
 
189
+ # Split long text into chunks
190
  max_chars = 1000
191
  chunks = []
192
  if len(clean_text) > max_chars:
193
  sentences = clean_text.split('.')
194
  current_chunk = ""
195
  for sentence in sentences:
196
+ if len(current_chunk) + len(sentence) + 1 < max_chars:
197
  current_chunk += sentence + "."
198
  else:
199
  chunks.append(current_chunk.strip())
200
  current_chunk = sentence + "."
201
+ if current_chunk:
202
  chunks.append(current_chunk.strip())
203
  else:
204
  chunks = [clean_text]
205
 
 
206
  # Generate audio for each chunk
207
  audio_chunks = []
208
  for chunk in chunks:
209
+ if chunk.strip():
210
  chunk_audio, _ = generate(tts_model, chunk, voicepack, lang='a')
211
  if isinstance(chunk_audio, torch.Tensor):
212
  chunk_audio = chunk_audio.cpu().numpy()
 
221
 
222
  except Exception as e:
223
  print(f"Error generating speech: {str(e)}")
 
 
224
  return None
225
 
 
 
226
  def process_query(query: str, history: List[List[str]], selected_voice: str = 'af') -> Dict[str, Any]:
227
  """Process user query with streaming effect"""
228
  try:
 
236
  current_history = history + [[query, "*Searching...*"]]
237
 
238
  yield {
239
+ 'answer_output': gr.Markdown("*Searching & Thinking...*"),
240
+ 'sources_output': gr.HTML(sources_html),
241
+ 'search_btn': gr.Button("Searching...", interactive=False),
242
+ 'chat_history_display': current_history,
243
+ 'audio_output': None
244
  }
245
 
246
  # Generate answer
 
248
  answer = generate_answer(prompt)
249
  final_answer = answer.split("Answer:")[-1].strip()
250
 
251
+ # Update history before TTS
252
  updated_history = history + [[query, final_answer]]
253
 
 
254
  # Generate speech from the answer (only if enabled)
255
  if TTS_ENABLED:
256
+ yield {
257
+ 'answer_output': gr.Markdown(final_answer),
258
+ 'sources_output': gr.HTML(sources_html),
259
+ 'search_btn': gr.Button("Generating audio...", interactive=False),
260
+ 'chat_history_display': updated_history,
261
+ 'audio_output': None
262
  }
263
  try:
264
  audio = generate_speech_with_gpu(final_answer, selected_voice)
265
+ if audio is None:
266
+ final_answer += "\n\n*Audio generation failed. The voicepack may be missing or incompatible.*"
267
  except Exception as e:
268
+ final_answer += f"\n\n*Error generating audio: {str(e)}*"
269
  audio = None
270
  else:
271
+ final_answer += "\n\n*TTS is disabled. Audio not available.*"
272
  audio = None
273
 
 
 
274
  yield {
275
+ 'answer_output': gr.Markdown(final_answer),
276
+ 'sources_output': gr.HTML(sources_html),
277
+ 'search_btn': gr.Button("Search", interactive=True),
278
+ 'chat_history_display': updated_history,
279
+ 'audio_output': audio if audio is not None else gr.Audio(value=None)
280
  }
281
 
282
  except Exception as e:
283
  error_message = str(e)
284
  if "GPU quota" in error_message:
285
+ error_message = "⚠️ GPU quota exceeded. Please try again later when the daily quota resets."
286
  yield {
287
+ 'answer_output': gr.Markdown(f"Error: {error_message}"),
288
+ 'sources_output': gr.HTML(sources_html),
289
+ 'search_btn': gr.Button("Search", interactive=True),
290
+ 'chat_history_display': history + [[query, f"*Error: {error_message}*"]],
291
+ 'audio_output': None
292
  }
293
 
294
  # Update the CSS for better contrast and readability
 
297
  max-width: 1200px !important;
298
  background-color: #f7f7f8 !important;
299
  }
 
300
  #header {
301
  text-align: center;
302
  margin-bottom: 2rem;
 
305
  border-radius: 12px;
306
  color: white;
307
  }
 
308
  #header h1 {
309
  color: white;
310
  font-size: 2.5rem;
311
  margin-bottom: 0.5rem;
312
  }
 
313
  #header h3 {
314
  color: #a8a9ab;
315
  }
 
316
  .search-container {
317
  background: #1a1b1e;
318
  border-radius: 12px;
 
320
  padding: 1rem;
321
  margin-bottom: 1rem;
322
  }
 
323
  .search-box {
324
  padding: 1rem;
325
  background: #2c2d30;
326
  border-radius: 8px;
327
  margin-bottom: 1rem;
328
  }
 
 
329
  .search-box input[type="text"] {
330
  background: #3a3b3e !important;
331
  border: 1px solid #4a4b4e !important;
332
  color: white !important;
333
  border-radius: 8px !important;
334
  }
 
335
  .search-box input[type="text"]::placeholder {
336
  color: #a8a9ab !important;
337
  }
 
 
338
  .search-box button {
339
  background: #2563eb !important;
340
  border: none !important;
341
  }
 
 
342
  .results-container {
343
  background: #2c2d30;
344
  border-radius: 8px;
345
  padding: 1rem;
346
  margin-top: 1rem;
347
  }
 
348
  .answer-box {
349
  background: #3a3b3e;
350
  border-radius: 8px;
 
352
  color: white;
353
  margin-bottom: 1rem;
354
  }
 
355
  .answer-box p {
356
  color: #e5e7eb;
357
  line-height: 1.6;
358
  }
 
359
  .sources-container {
360
  margin-top: 1rem;
361
  background: #2c2d30;
362
  border-radius: 8px;
363
  padding: 1rem;
364
  }
 
365
  .source-item {
366
  display: flex;
367
  padding: 12px;
 
370
  border-radius: 8px;
371
  transition: all 0.2s;
372
  }
 
373
  .source-item:hover {
374
  background: #4a4b4e;
375
  }
 
376
  .source-number {
377
  font-weight: bold;
378
  margin-right: 12px;
379
  color: #60a5fa;
380
  }
 
381
  .source-content {
382
  flex: 1;
383
  }
 
384
  .source-title {
385
  color: #60a5fa;
386
  font-weight: 500;
 
388
  display: block;
389
  margin-bottom: 4px;
390
  }
 
391
  .source-date {
392
  color: #a8a9ab;
393
  font-size: 0.9em;
394
  margin-left: 8px;
395
  }
 
396
  .source-snippet {
397
  color: #e5e7eb;
398
  font-size: 0.9em;
399
  line-height: 1.4;
400
  }
 
401
  .chat-history {
402
  max-height: 400px;
403
  overflow-y: auto;
 
406
  border-radius: 8px;
407
  margin-top: 1rem;
408
  }
 
409
  .examples-container {
410
  background: #2c2d30;
411
  border-radius: 8px;
412
  padding: 1rem;
413
  margin-top: 1rem;
414
  }
 
415
  .examples-container button {
416
  background: #3a3b3e !important;
417
  border: 1px solid #4a4b4e !important;
418
  color: #e5e7eb !important;
419
  }
 
 
420
  .markdown-content {
421
  color: #e5e7eb !important;
422
  }
 
423
  .markdown-content h1, .markdown-content h2, .markdown-content h3 {
424
  color: white !important;
425
  }
 
426
  .markdown-content a {
427
  color: #60a5fa !important;
428
  }
 
 
429
  .accordion {
430
  background: #2c2d30 !important;
431
  border-radius: 8px !important;
432
  margin-top: 1rem !important;
433
  }
 
434
  .voice-selector {
435
  margin-top: 1rem;
436
  background: #2c2d30;
437
  border-radius: 8px;
438
  padding: 0.5rem;
439
  }
 
440
  .voice-selector select {
441
  background: #3a3b3e !important;
442
  color: white !important;
 
508
  )
509
 
510
  if __name__ == "__main__":
511
+ demo.launch(share=True)