Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -274,27 +274,9 @@ def initialize_agent_with_prompt(prompt_template):
|
|
| 274 |
return agent
|
| 275 |
|
| 276 |
|
| 277 |
-
# def generate_answer(message, choice):
|
| 278 |
-
# logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
| 279 |
-
|
| 280 |
-
# if choice == "Details":
|
| 281 |
-
# agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
| 282 |
-
# elif choice == "Conversational":
|
| 283 |
-
# agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
| 284 |
-
# else:
|
| 285 |
-
# logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
|
| 286 |
-
# agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
|
| 287 |
-
# response = agent(message)
|
| 288 |
-
|
| 289 |
-
# addresses = extract_addresses(response['output'])
|
| 290 |
-
# return response['output'], addresses
|
| 291 |
-
|
| 292 |
def generate_answer(message, choice):
|
| 293 |
logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
| 294 |
|
| 295 |
-
# Reset conversational memory
|
| 296 |
-
conversational_memory.clear()
|
| 297 |
-
|
| 298 |
if choice == "Details":
|
| 299 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
| 300 |
elif choice == "Conversational":
|
|
@@ -309,6 +291,8 @@ def generate_answer(message, choice):
|
|
| 309 |
|
| 310 |
|
| 311 |
|
|
|
|
|
|
|
| 312 |
def bot(history, choice, tts_choice, state):
|
| 313 |
if not history:
|
| 314 |
return history
|
|
@@ -495,7 +479,21 @@ base_audio_drive = "/data/audio"
|
|
| 495 |
# return stream, full_text, result
|
| 496 |
|
| 497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 498 |
def transcribe_function(stream, new_chunk):
|
|
|
|
|
|
|
|
|
|
| 499 |
sr, y = new_chunk[0], new_chunk[1]
|
| 500 |
y = y.astype(np.float32) / np.max(np.abs(y))
|
| 501 |
if stream is not None:
|
|
@@ -504,7 +502,7 @@ def transcribe_function(stream, new_chunk):
|
|
| 504 |
stream = y
|
| 505 |
result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
| 506 |
full_text = result.get("text", "")
|
| 507 |
-
return stream, full_text
|
| 508 |
|
| 509 |
def update_map_with_response(history):
|
| 510 |
if not history:
|
|
@@ -711,9 +709,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 711 |
clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
| 712 |
|
| 713 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
| 714 |
-
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
|
| 715 |
-
|
| 716 |
-
|
| 717 |
|
| 718 |
|
| 719 |
|
|
|
|
| 274 |
return agent
|
| 275 |
|
| 276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
def generate_answer(message, choice):
|
| 278 |
logging.debug(f"generate_answer called with prompt_choice: {choice}")
|
| 279 |
|
|
|
|
|
|
|
|
|
|
| 280 |
if choice == "Details":
|
| 281 |
agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
|
| 282 |
elif choice == "Conversational":
|
|
|
|
| 291 |
|
| 292 |
|
| 293 |
|
| 294 |
+
|
| 295 |
+
|
| 296 |
def bot(history, choice, tts_choice, state):
|
| 297 |
if not history:
|
| 298 |
return history
|
|
|
|
| 479 |
# return stream, full_text, result
|
| 480 |
|
| 481 |
|
| 482 |
+
# def transcribe_function(stream, new_chunk):
|
| 483 |
+
# sr, y = new_chunk[0], new_chunk[1]
|
| 484 |
+
# y = y.astype(np.float32) / np.max(np.abs(y))
|
| 485 |
+
# if stream is not None:
|
| 486 |
+
# stream = np.concatenate([stream, y])
|
| 487 |
+
# else:
|
| 488 |
+
# stream = y
|
| 489 |
+
# result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
| 490 |
+
# full_text = result.get("text", "")
|
| 491 |
+
# return stream, full_text # Return the transcribed text
|
| 492 |
+
|
| 493 |
def transcribe_function(stream, new_chunk):
|
| 494 |
+
# Reset conversational memory
|
| 495 |
+
conversational_memory.clear()
|
| 496 |
+
|
| 497 |
sr, y = new_chunk[0], new_chunk[1]
|
| 498 |
y = y.astype(np.float32) / np.max(np.abs(y))
|
| 499 |
if stream is not None:
|
|
|
|
| 502 |
stream = y
|
| 503 |
result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
| 504 |
full_text = result.get("text", "")
|
| 505 |
+
return stream, full_text
|
| 506 |
|
| 507 |
def update_map_with_response(history):
|
| 508 |
if not history:
|
|
|
|
| 709 |
clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
| 710 |
|
| 711 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
| 712 |
+
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
|
|
|
|
|
|
|
| 713 |
|
| 714 |
|
| 715 |
|