Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -340,35 +340,6 @@ chain_neo4j = (
|
|
340 |
|
341 |
|
342 |
|
343 |
-
# def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
344 |
-
# if not history:
|
345 |
-
# return history
|
346 |
-
|
347 |
-
# # Select the model
|
348 |
-
# selected_model = chat_model if model_choice == "GPT-4o" else phi_pipe
|
349 |
-
|
350 |
-
# response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
351 |
-
# history[-1][1] = ""
|
352 |
-
|
353 |
-
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
354 |
-
# if tts_choice == "Alpha":
|
355 |
-
# audio_future = executor.submit(generate_audio_elevenlabs, response)
|
356 |
-
# elif tts_choice == "Beta":
|
357 |
-
# audio_future = executor.submit(generate_audio_parler_tts, response)
|
358 |
-
# # elif tts_choice == "Gamma":
|
359 |
-
# # audio_future = executor.submit(generate_audio_mars5, response)
|
360 |
-
|
361 |
-
# for character in response:
|
362 |
-
# history[-1][1] += character
|
363 |
-
# time.sleep(0.05)
|
364 |
-
# yield history, None
|
365 |
-
|
366 |
-
# audio_path = audio_future.result()
|
367 |
-
# yield history, audio_path
|
368 |
-
|
369 |
-
# history.append([response, None])
|
370 |
-
|
371 |
-
|
372 |
def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
373 |
if not history:
|
374 |
return history
|
@@ -377,21 +348,50 @@ def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
|
377 |
selected_model = chat_model if model_choice == "GPT-4o" else phi_pipe
|
378 |
|
379 |
response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
380 |
-
history[-1][1] =
|
381 |
|
382 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
383 |
if tts_choice == "Alpha":
|
384 |
-
|
385 |
elif tts_choice == "Beta":
|
386 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
387 |
|
388 |
-
|
389 |
-
|
390 |
-
yield history, audio_chunk_path # Yield audio chunks as they are generated
|
391 |
|
392 |
history.append([response, None])
|
393 |
|
394 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
395 |
|
396 |
|
397 |
|
|
|
340 |
|
341 |
|
342 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
343 |
def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
344 |
if not history:
|
345 |
return history
|
|
|
348 |
selected_model = chat_model if model_choice == "GPT-4o" else phi_pipe
|
349 |
|
350 |
response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
351 |
+
history[-1][1] = ""
|
352 |
|
353 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
354 |
if tts_choice == "Alpha":
|
355 |
+
audio_future = executor.submit(generate_audio_elevenlabs, response)
|
356 |
elif tts_choice == "Beta":
|
357 |
+
audio_future = executor.submit(generate_audio_parler_tts, response)
|
358 |
+
# elif tts_choice == "Gamma":
|
359 |
+
# audio_future = executor.submit(generate_audio_mars5, response)
|
360 |
+
|
361 |
+
for character in response:
|
362 |
+
history[-1][1] += character
|
363 |
+
time.sleep(0.05)
|
364 |
+
yield history, None
|
365 |
|
366 |
+
audio_path = audio_future.result()
|
367 |
+
yield history, audio_path
|
|
|
368 |
|
369 |
history.append([response, None])
|
370 |
|
371 |
|
372 |
+
# def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
373 |
+
# if not history:
|
374 |
+
# return history
|
375 |
+
|
376 |
+
# # Select the model
|
377 |
+
# selected_model = chat_model if model_choice == "GPT-4o" else phi_pipe
|
378 |
+
|
379 |
+
# response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
380 |
+
# history[-1][1] = response # Set the full response in the history immediately
|
381 |
+
|
382 |
+
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
383 |
+
# if tts_choice == "Alpha":
|
384 |
+
# audio_stream = generate_audio_elevenlabs(response)
|
385 |
+
# elif tts_choice == "Beta":
|
386 |
+
# audio_stream = generate_audio_parler_tts(response)
|
387 |
+
|
388 |
+
# # Stream audio chunks in real-time without interrupting text output
|
389 |
+
# for audio_chunk_path in audio_stream:
|
390 |
+
# yield history, audio_chunk_path # Yield audio chunks as they are generated
|
391 |
+
|
392 |
+
# history.append([response, None])
|
393 |
+
|
394 |
+
|
395 |
|
396 |
|
397 |
|