Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -527,7 +527,7 @@ def transcribe_function(stream, new_chunk):
|
|
| 527 |
|
| 528 |
result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
| 529 |
|
| 530 |
-
full_text = result.get("text")
|
| 531 |
|
| 532 |
return stream, full_text, result
|
| 533 |
|
|
@@ -709,6 +709,63 @@ def update_images():
|
|
| 709 |
image_3 = generate_image(hardcoded_prompt_3)
|
| 710 |
return image_1, image_2, image_3
|
| 711 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 712 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
| 713 |
with gr.Row():
|
| 714 |
with gr.Column():
|
|
@@ -720,22 +777,13 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 720 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
| 721 |
|
| 722 |
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
| 723 |
-
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input],api_name="voice_query")
|
| 724 |
tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta", "Gamma"], value="Alpha")
|
| 725 |
-
retriver_button = gr.Button("
|
| 726 |
-
# retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(fn=bot, inputs=[chatbot, choice, tts_choice], outputs=[chatbot],api_name="Ask_Retriever")
|
| 727 |
-
# retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(
|
| 728 |
-
# fn=bot, inputs=[chatbot, choice, tts_choice], outputs=[chatbot], api_name="Ask_Retriever")
|
| 729 |
retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(
|
| 730 |
-
|
| 731 |
-
|
| 732 |
-
|
| 733 |
-
|
| 734 |
-
# voice_response_button = gr.Button("Voice Response")
|
| 735 |
-
# voice_response_button.click(fn=generate_voice_response, inputs=[chatbot, tts_choice], outputs=[gr.Audio(interactive=False, autoplay=True)],api_name="generate_voice_response")
|
| 736 |
-
|
| 737 |
-
bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)],api_name="generate_voice_response")
|
| 738 |
-
# bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
| 739 |
bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
| 740 |
chatbot.like(print_like_dislike, None, None)
|
| 741 |
clear_button = gr.Button("Clear")
|
|
@@ -743,17 +791,10 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 743 |
|
| 744 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
| 745 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
|
| 746 |
-
|
| 747 |
|
| 748 |
gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
|
| 749 |
location_output = gr.HTML()
|
| 750 |
-
bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output],api_name="map_finder")
|
| 751 |
-
|
| 752 |
-
|
| 753 |
-
# with gr.Column():
|
| 754 |
-
# weather_output = gr.HTML(value=fetch_local_weather())
|
| 755 |
-
# news_output = gr.HTML(value=fetch_local_news())
|
| 756 |
-
# news_output = gr.HTML(value=fetch_local_events())
|
| 757 |
|
| 758 |
with gr.Column():
|
| 759 |
image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
|
@@ -768,5 +809,3 @@ demo.launch(share=True)
|
|
| 768 |
|
| 769 |
|
| 770 |
|
| 771 |
-
|
| 772 |
-
|
|
|
|
| 527 |
|
| 528 |
result = pipe_asr({"array": stream, "sampling_rate": sr}, return_timestamps=False)
|
| 529 |
|
| 530 |
+
full_text = result.get("text","")
|
| 531 |
|
| 532 |
return stream, full_text, result
|
| 533 |
|
|
|
|
| 709 |
image_3 = generate_image(hardcoded_prompt_3)
|
| 710 |
return image_1, image_2, image_3
|
| 711 |
|
| 712 |
+
# with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
| 713 |
+
# with gr.Row():
|
| 714 |
+
# with gr.Column():
|
| 715 |
+
# state = gr.State()
|
| 716 |
+
|
| 717 |
+
# chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
| 718 |
+
# choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
| 719 |
+
|
| 720 |
+
# gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
| 721 |
+
|
| 722 |
+
# chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
| 723 |
+
# chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input],api_name="voice_query")
|
| 724 |
+
# tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta", "Gamma"], value="Alpha")
|
| 725 |
+
# retriver_button = gr.Button("Retriver")
|
| 726 |
+
# # retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(fn=bot, inputs=[chatbot, choice, tts_choice], outputs=[chatbot],api_name="Ask_Retriever")
|
| 727 |
+
# # retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(
|
| 728 |
+
# # fn=bot, inputs=[chatbot, choice, tts_choice], outputs=[chatbot], api_name="Ask_Retriever")
|
| 729 |
+
# retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(
|
| 730 |
+
# fn=bot, inputs=[chatbot, choice, tts_choice, state], outputs=[chatbot], api_name="Ask_Retriever")
|
| 731 |
+
|
| 732 |
+
# #gr.Audio(interactive=False, autoplay=True)]
|
| 733 |
+
|
| 734 |
+
# # voice_response_button = gr.Button("Voice Response")
|
| 735 |
+
# # voice_response_button.click(fn=generate_voice_response, inputs=[chatbot, tts_choice], outputs=[gr.Audio(interactive=False, autoplay=True)],api_name="generate_voice_response")
|
| 736 |
+
|
| 737 |
+
# bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)],api_name="generate_voice_response")
|
| 738 |
+
# # bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)])
|
| 739 |
+
# bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
| 740 |
+
# chatbot.like(print_like_dislike, None, None)
|
| 741 |
+
# clear_button = gr.Button("Clear")
|
| 742 |
+
# clear_button.click(fn=clear_textbox, inputs=None, outputs=chat_input)
|
| 743 |
+
|
| 744 |
+
# audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
| 745 |
+
# audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
|
| 746 |
+
|
| 747 |
+
|
| 748 |
+
# gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
|
| 749 |
+
# location_output = gr.HTML()
|
| 750 |
+
# bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output],api_name="map_finder")
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
# # with gr.Column():
|
| 754 |
+
# # weather_output = gr.HTML(value=fetch_local_weather())
|
| 755 |
+
# # news_output = gr.HTML(value=fetch_local_news())
|
| 756 |
+
# # news_output = gr.HTML(value=fetch_local_events())
|
| 757 |
+
|
| 758 |
+
# with gr.Column():
|
| 759 |
+
# image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
| 760 |
+
# image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
| 761 |
+
# image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
| 762 |
+
|
| 763 |
+
# refresh_button = gr.Button("Refresh Images")
|
| 764 |
+
# refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
|
| 765 |
+
|
| 766 |
+
# demo.queue()
|
| 767 |
+
# demo.launch(share=True)
|
| 768 |
+
|
| 769 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
| 770 |
with gr.Row():
|
| 771 |
with gr.Column():
|
|
|
|
| 777 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
| 778 |
|
| 779 |
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
|
| 780 |
+
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input], api_name="voice_query")
|
| 781 |
tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta", "Gamma"], value="Alpha")
|
| 782 |
+
retriver_button = gr.Button("Retriever")
|
|
|
|
|
|
|
|
|
|
| 783 |
retriver_button.click(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input]).then(
|
| 784 |
+
fn=bot, inputs=[chatbot, choice, tts_choice, state], outputs=[chatbot, gr.Audio(interactive=False, autoplay=True)], api_name="Ask_Retriever")
|
| 785 |
+
|
| 786 |
+
bot_msg = chat_msg.then(bot, [chatbot, choice, tts_choice], [chatbot, gr.Audio(interactive=False, autoplay=True)], api_name="generate_voice_response")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 787 |
bot_msg.then(lambda: gr.Textbox(value="", interactive=True, placeholder="Ask Radar!!!...", show_label=False), None, [chat_input])
|
| 788 |
chatbot.like(print_like_dislike, None, None)
|
| 789 |
clear_button = gr.Button("Clear")
|
|
|
|
| 791 |
|
| 792 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
|
| 793 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
|
|
|
|
| 794 |
|
| 795 |
gr.Markdown("<h1 style='color: red;'>Map</h1>", elem_id="location-markdown")
|
| 796 |
location_output = gr.HTML()
|
| 797 |
+
bot_msg.then(show_map_if_details, [chatbot, choice], [location_output, location_output], api_name="map_finder")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 798 |
|
| 799 |
with gr.Column():
|
| 800 |
image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
|
|
|
| 809 |
|
| 810 |
|
| 811 |
|
|
|
|
|
|