Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -727,58 +727,6 @@ default_prompts = [
|
|
727 |
"Taylor Swift Concert"
|
728 |
]
|
729 |
|
730 |
-
# Add image-related components and integrate it with Gradio
|
731 |
-
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
732 |
-
|
733 |
-
with gr.Row():
|
734 |
-
with gr.Column():
|
735 |
-
state = gr.State()
|
736 |
-
|
737 |
-
chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
738 |
-
choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
739 |
-
retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
740 |
-
model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2", "LM-3"], value="LM-1")
|
741 |
-
|
742 |
-
# Link the dropdown change to handle_model_choice_change
|
743 |
-
model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|
744 |
-
|
745 |
-
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
746 |
-
|
747 |
-
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!", placeholder="Hey Radar...!!")
|
748 |
-
tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
|
749 |
-
|
750 |
-
retriever_button = gr.Button("Retriever")
|
751 |
-
|
752 |
-
clear_button = gr.Button("Clear")
|
753 |
-
clear_button.click(lambda: [None, None], outputs=[chat_input, state])
|
754 |
-
|
755 |
-
gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
|
756 |
-
location_output = gr.HTML()
|
757 |
-
audio_output = gr.Audio(interactive=False, autoplay=True)
|
758 |
-
|
759 |
-
# Image output section
|
760 |
-
gr.Markdown("<h2>Generated Images on Load</h2>")
|
761 |
-
image_output_1 = gr.Image(type="filepath", label="Generated Image 1", width=400, height=400)
|
762 |
-
image_output_2 = gr.Image(type="filepath", label="Generated Image 2", width=400, height=400)
|
763 |
-
image_output_3 = gr.Image(type="filepath", label="Generated Image 3", width=400, height=400)
|
764 |
-
|
765 |
-
# Automatically generate and display the three images on startup using the predefined prompts
|
766 |
-
startup_image_1 = generate_flux_image(default_prompts[0])
|
767 |
-
startup_image_2 = generate_flux_image(default_prompts[1])
|
768 |
-
startup_image_3 = generate_flux_image(default_prompts[2])
|
769 |
-
|
770 |
-
image_output_1.update(value=startup_image_1)
|
771 |
-
image_output_2.update(value=startup_image_2)
|
772 |
-
image_output_3.update(value=startup_image_3)
|
773 |
-
|
774 |
-
with gr.Column():
|
775 |
-
weather_output = gr.HTML(value=fetch_local_weather())
|
776 |
-
news_output = gr.HTML(value=fetch_local_news())
|
777 |
-
events_output = gr.HTML(value=fetch_local_events())
|
778 |
-
|
779 |
-
# Rest of the Gradio interface setup
|
780 |
-
|
781 |
-
demo.launch(show_error=True)
|
782 |
|
783 |
|
784 |
|
@@ -1462,13 +1410,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1462 |
|
1463 |
|
1464 |
|
1465 |
-
|
1466 |
-
# retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
1467 |
-
# .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
1468 |
-
# .then(fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode, model_choice], outputs=[chatbot, audio_output], api_name="api_askchatbot_then_generateaudio")
|
1469 |
-
# .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
1470 |
-
# .then(fn=clear_textbox, inputs=[], outputs=[chat_input],api_name="api_clear_textbox")
|
1471 |
-
# )
|
1472 |
|
1473 |
retriever_sequence = (
|
1474 |
retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
@@ -1486,14 +1428,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1486 |
|
1487 |
|
1488 |
|
1489 |
-
|
1490 |
-
# chat_input.submit(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory").then(
|
1491 |
-
# fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode, model_choice], outputs=[chatbot, audio_output], api_name="api_askchatbot_then_generateaudio"
|
1492 |
-
# ).then(
|
1493 |
-
# fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
1494 |
-
# ).then(
|
1495 |
-
# fn=clear_textbox, inputs=[], outputs=[chat_input],api_name="api_clear_textbox"
|
1496 |
-
# )
|
1497 |
|
1498 |
chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
|
1499 |
fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|
|
|
727 |
"Taylor Swift Concert"
|
728 |
]
|
729 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
730 |
|
731 |
|
732 |
|
|
|
1410 |
|
1411 |
|
1412 |
|
1413 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
1414 |
|
1415 |
retriever_sequence = (
|
1416 |
retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
|
|
1428 |
|
1429 |
|
1430 |
|
1431 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1432 |
|
1433 |
chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
|
1434 |
fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|