Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -447,25 +447,23 @@ import concurrent.futures
|
|
447 |
import asyncio
|
448 |
|
449 |
async def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
450 |
-
# Initialize an empty response
|
451 |
response = ""
|
452 |
|
453 |
-
# Start generating
|
454 |
-
|
455 |
audio_future = None
|
456 |
|
457 |
-
|
458 |
-
|
459 |
-
chunk = await text_future
|
460 |
response += chunk
|
461 |
history[-1][1] += chunk
|
462 |
yield history, None # Stream the text output as it's generated
|
463 |
|
464 |
-
# Start generating Parler TTS if selected
|
465 |
if tts_choice == "Beta" and audio_future is None:
|
466 |
audio_future = asyncio.create_task(generate_audio_parler_tts(response, callback=lambda audio_chunk: yield_audio(audio_chunk)))
|
467 |
|
468 |
-
# Wait for the audio to finish streaming
|
469 |
if audio_future is not None:
|
470 |
await audio_future
|
471 |
|
@@ -475,11 +473,13 @@ def yield_audio(audio_chunk):
|
|
475 |
write_wav(temp_audio_path, 16000, audio_chunk.astype(np.float32))
|
476 |
return temp_audio_path
|
477 |
|
|
|
478 |
async def generate_text(history, choice, retrieval_mode, model_choice):
|
479 |
# Simulate text generation chunk by chunk
|
480 |
-
|
|
|
481 |
await asyncio.sleep(0.05) # Simulate time delay between character generation
|
482 |
-
yield char
|
483 |
|
484 |
|
485 |
|
|
|
447 |
import asyncio
|
448 |
|
449 |
async def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
|
|
450 |
response = ""
|
451 |
|
452 |
+
# Start generating text asynchronously
|
453 |
+
text_gen = generate_text(history, choice, retrieval_mode, model_choice)
|
454 |
audio_future = None
|
455 |
|
456 |
+
# Iterate over the text generator
|
457 |
+
async for chunk in text_gen:
|
|
|
458 |
response += chunk
|
459 |
history[-1][1] += chunk
|
460 |
yield history, None # Stream the text output as it's generated
|
461 |
|
462 |
+
# Start generating Parler TTS if selected and not started already
|
463 |
if tts_choice == "Beta" and audio_future is None:
|
464 |
audio_future = asyncio.create_task(generate_audio_parler_tts(response, callback=lambda audio_chunk: yield_audio(audio_chunk)))
|
465 |
|
466 |
+
# Wait for the audio to finish streaming if it was started
|
467 |
if audio_future is not None:
|
468 |
await audio_future
|
469 |
|
|
|
473 |
write_wav(temp_audio_path, 16000, audio_chunk.astype(np.float32))
|
474 |
return temp_audio_path
|
475 |
|
476 |
+
# Text generator as an async generator
|
477 |
async def generate_text(history, choice, retrieval_mode, model_choice):
|
478 |
# Simulate text generation chunk by chunk
|
479 |
+
text_to_generate = "Generating text response..."
|
480 |
+
for char in text_to_generate:
|
481 |
await asyncio.sleep(0.05) # Simulate time delay between character generation
|
482 |
+
yield char # Yield each character as it's generated
|
483 |
|
484 |
|
485 |
|