Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -275,46 +275,27 @@ def generate_answer(message, choice):
|
|
275 |
addresses = extract_addresses(response['output'])
|
276 |
return response['output'], addresses
|
277 |
|
278 |
-
# def bot(history, choice, tts_model):
|
279 |
-
# if not history:
|
280 |
-
# return history
|
281 |
-
# response, addresses = generate_answer(history[-1][0], choice)
|
282 |
-
# history[-1][1] = ""
|
283 |
-
|
284 |
-
# # Generate audio for the entire response in a separate thread
|
285 |
-
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
286 |
-
# if tts_model == "ElevenLabs":
|
287 |
-
# audio_future = executor.submit(generate_audio_elevenlabs, response)
|
288 |
-
# else:
|
289 |
-
# audio_future = executor.submit(generate_audio_parler_tts, response)
|
290 |
-
|
291 |
-
# for character in response:
|
292 |
-
# history[-1][1] += character
|
293 |
-
# time.sleep(0.05) # Adjust the speed of text appearance
|
294 |
-
# yield history, None
|
295 |
-
|
296 |
-
# audio_path = audio_future.result()
|
297 |
-
# yield history, audio_path
|
298 |
-
|
299 |
def bot(history, choice, tts_model):
|
300 |
if not history:
|
301 |
return history
|
302 |
response, addresses = generate_answer(history[-1][0], choice)
|
303 |
history[-1][1] = ""
|
304 |
-
|
|
|
305 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
306 |
if tts_model == "ElevenLabs":
|
307 |
audio_future = executor.submit(generate_audio_elevenlabs, response)
|
308 |
else:
|
309 |
audio_future = executor.submit(generate_audio_parler_tts, response)
|
310 |
-
|
311 |
for character in response:
|
312 |
history[-1][1] += character
|
313 |
-
time.sleep(0.05)
|
314 |
-
yield history, None
|
315 |
-
|
316 |
audio_path = audio_future.result()
|
317 |
-
yield history, audio_path
|
|
|
318 |
|
319 |
def add_message(history, message):
|
320 |
history.append((message, None))
|
@@ -576,100 +557,48 @@ def update_images():
|
|
576 |
image_3 = generate_image(hardcoded_prompt_3)
|
577 |
return image_1, image_2, image_3
|
578 |
|
579 |
-
# with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
580 |
-
|
581 |
-
# with gr.Row():
|
582 |
-
# with gr.Column():
|
583 |
-
# state = gr.State()
|
584 |
-
|
585 |
-
# chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
586 |
-
# choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
587 |
-
# tts_choice = gr.Radio(label="Select TTS Model", choices=["ElevenLabs", "Parler TTS"], value="Parler TTS")
|
588 |
-
|
589 |
-
# gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
590 |
-
# chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
591 |
-
# chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
592 |
-
# bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
593 |
-
# bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
594 |
-
# chatbot.like(print_like_dislike, None, None)
|
595 |
-
# clear_button = gr.Button("Clear")
|
596 |
-
# clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
597 |
-
|
598 |
-
|
599 |
-
# audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
600 |
-
# audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
|
601 |
-
|
602 |
-
# # gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
|
603 |
-
# # location_output = gr.HTML()
|
604 |
-
# # bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output])
|
605 |
-
|
606 |
-
# # with gr.Column():
|
607 |
-
# # weather_output = gr.HTML(value=fetch_local_weather())
|
608 |
-
# # news_output = gr.HTML(value=fetch_local_news())
|
609 |
-
# # news_output = gr.HTML(value=fetch_local_events())
|
610 |
-
|
611 |
-
# with gr.Column():
|
612 |
-
|
613 |
-
# image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
614 |
-
# image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
615 |
-
# image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
616 |
-
|
617 |
-
|
618 |
-
# refresh_button = gr.Button("Refresh Images")
|
619 |
-
# refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
620 |
-
|
621 |
-
# demo.queue()
|
622 |
-
# demo.launch(share=True)
|
623 |
-
|
624 |
-
def generate_follow_up_buttons(response):
|
625 |
-
return gr.update(visible=True), gr.update(value=response)
|
626 |
-
|
627 |
-
def handle_follow_up_choice(choice, history):
|
628 |
-
follow_up_responses = {
|
629 |
-
"Question 1": "This is the response to follow-up question 1.",
|
630 |
-
"Question 2": "This is the response to follow-up question 2."
|
631 |
-
}
|
632 |
-
follow_up_response = follow_up_responses.get(choice, "Sorry, I didn't understand that choice.")
|
633 |
-
history.append((choice, follow_up_response))
|
634 |
-
return history, gr.update(visible=False)
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
639 |
-
|
640 |
with gr.Row():
|
641 |
with gr.Column():
|
642 |
state = gr.State()
|
|
|
643 |
chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
644 |
choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
645 |
tts_choice = gr.Radio(label="Select TTS Model", choices=["ElevenLabs", "Parler TTS"], value="Parler TTS")
|
646 |
-
|
647 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
648 |
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
649 |
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
650 |
-
bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)
|
651 |
bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
652 |
-
|
653 |
-
follow_up_button_1 = gr.Button("Follow-up Question 1", visible=False)
|
654 |
-
follow_up_button_2 = gr.Button("Follow-up Question 2", visible=False)
|
655 |
-
follow_up_button_1.click(handle_follow_up_choice, inputs=[follow_up_button_1, chatbot], outputs=[chatbot, follow_up_button_1, follow_up_button_2])
|
656 |
-
follow_up_button_2.click(handle_follow_up_choice, inputs=[follow_up_button_2, chatbot], outputs=[chatbot, follow_up_button_1, follow_up_button_2])
|
657 |
-
|
658 |
chatbot.like(print_like_dislike, None, None)
|
659 |
clear_button = gr.Button("Clear")
|
660 |
clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
661 |
-
|
|
|
662 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
663 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
|
664 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
665 |
with gr.Column():
|
|
|
666 |
image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
667 |
image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
668 |
image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
669 |
|
|
|
670 |
refresh_button = gr.Button("Refresh Images")
|
671 |
refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
672 |
-
|
673 |
demo.queue()
|
674 |
demo.launch(share=True)
|
675 |
|
@@ -679,3 +608,4 @@ demo.launch(share=True)
|
|
679 |
|
680 |
|
681 |
|
|
|
|
275 |
addresses = extract_addresses(response['output'])
|
276 |
return response['output'], addresses
|
277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
def bot(history, choice, tts_model):
|
279 |
if not history:
|
280 |
return history
|
281 |
response, addresses = generate_answer(history[-1][0], choice)
|
282 |
history[-1][1] = ""
|
283 |
+
|
284 |
+
# Generate audio for the entire response in a separate thread
|
285 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
286 |
if tts_model == "ElevenLabs":
|
287 |
audio_future = executor.submit(generate_audio_elevenlabs, response)
|
288 |
else:
|
289 |
audio_future = executor.submit(generate_audio_parler_tts, response)
|
290 |
+
|
291 |
for character in response:
|
292 |
history[-1][1] += character
|
293 |
+
time.sleep(0.05) # Adjust the speed of text appearance
|
294 |
+
yield history, None
|
295 |
+
|
296 |
audio_path = audio_future.result()
|
297 |
+
yield history, audio_path
|
298 |
+
|
299 |
|
300 |
def add_message(history, message):
|
301 |
history.append((message, None))
|
|
|
557 |
image_3 = generate_image(hardcoded_prompt_3)
|
558 |
return image_1, image_2, image_3
|
559 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
560 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
561 |
+
|
562 |
with gr.Row():
|
563 |
with gr.Column():
|
564 |
state = gr.State()
|
565 |
+
|
566 |
chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
567 |
choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
568 |
tts_choice = gr.Radio(label="Select TTS Model", choices=["ElevenLabs", "Parler TTS"], value="Parler TTS")
|
569 |
+
|
570 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
571 |
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
572 |
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
|
573 |
+
bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
574 |
bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
|
|
|
|
|
|
|
|
|
|
|
|
575 |
chatbot.like(print_like_dislike, None, None)
|
576 |
clear_button = gr.Button("Clear")
|
577 |
clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
578 |
+
|
579 |
+
|
580 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
581 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="SAMLOne_real_time")
|
582 |
|
583 |
+
# gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
|
584 |
+
# location_output = gr.HTML()
|
585 |
+
# bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output])
|
586 |
+
|
587 |
+
# with gr.Column():
|
588 |
+
# weather_output = gr.HTML(value=fetch_local_weather())
|
589 |
+
# news_output = gr.HTML(value=fetch_local_news())
|
590 |
+
# news_output = gr.HTML(value=fetch_local_events())
|
591 |
+
|
592 |
with gr.Column():
|
593 |
+
|
594 |
image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
595 |
image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
596 |
image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
597 |
|
598 |
+
|
599 |
refresh_button = gr.Button("Refresh Images")
|
600 |
refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
601 |
+
|
602 |
demo.queue()
|
603 |
demo.launch(share=True)
|
604 |
|
|
|
608 |
|
609 |
|
610 |
|
611 |
+
|