Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1210,6 +1210,121 @@ def fetch_google_flights(departure_id="JFK", arrival_id="BHM", outbound_date=cur
|
|
| 1210 |
|
| 1211 |
|
| 1212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1213 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
| 1214 |
with gr.Row():
|
| 1215 |
with gr.Column():
|
|
@@ -1220,12 +1335,19 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 1220 |
retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
| 1221 |
model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2"], value="LM-1")
|
| 1222 |
|
| 1223 |
-
# Link the dropdown change to handle_model_choice_change
|
| 1224 |
model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|
| 1225 |
|
| 1226 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
| 1227 |
|
| 1228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1229 |
tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
|
| 1230 |
retriever_button = gr.Button("Retriever")
|
| 1231 |
|
|
@@ -1236,51 +1358,20 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 1236 |
location_output = gr.HTML()
|
| 1237 |
audio_output = gr.Audio(interactive=False, autoplay=True)
|
| 1238 |
|
| 1239 |
-
def stop_audio():
|
| 1240 |
-
audio_output.stop()
|
| 1241 |
-
return None
|
| 1242 |
-
|
| 1243 |
-
|
| 1244 |
-
# retriever_sequence = (
|
| 1245 |
-
# retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
| 1246 |
-
# .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
| 1247 |
-
# .then(fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode, model_choice], outputs=[chatbot, audio_output], api_name="api_askchatbot_then_generateaudio")
|
| 1248 |
-
# .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
| 1249 |
-
# .then(fn=clear_textbox, inputs=[], outputs=[chat_input],api_name="api_clear_textbox")
|
| 1250 |
-
# )
|
| 1251 |
-
|
| 1252 |
retriever_sequence = (
|
| 1253 |
retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
| 1254 |
.then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
| 1255 |
-
# First, generate the bot response
|
| 1256 |
.then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
|
| 1257 |
-
# Then, generate the TTS response based on the bot's response
|
| 1258 |
.then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
|
| 1259 |
.then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
| 1260 |
.then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
|
| 1261 |
)
|
| 1262 |
|
| 1263 |
-
|
| 1264 |
-
|
| 1265 |
-
|
| 1266 |
-
|
| 1267 |
-
|
| 1268 |
-
# chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output],api_name="api_stop_audio_recording")
|
| 1269 |
-
# chat_input.submit(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory").then(
|
| 1270 |
-
# fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode, model_choice], outputs=[chatbot, audio_output], api_name="api_askchatbot_then_generateaudio"
|
| 1271 |
-
# ).then(
|
| 1272 |
-
# fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
| 1273 |
-
# ).then(
|
| 1274 |
-
# fn=clear_textbox, inputs=[], outputs=[chat_input],api_name="api_clear_textbox"
|
| 1275 |
-
# )
|
| 1276 |
-
|
| 1277 |
chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
|
| 1278 |
fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|
| 1279 |
).then(
|
| 1280 |
-
# First, generate the bot response
|
| 1281 |
fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
|
| 1282 |
).then(
|
| 1283 |
-
# Then, generate the TTS response based on the bot's response
|
| 1284 |
fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
|
| 1285 |
).then(
|
| 1286 |
fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
|
@@ -1288,12 +1379,6 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 1288 |
fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
|
| 1289 |
)
|
| 1290 |
|
| 1291 |
-
|
| 1292 |
-
|
| 1293 |
-
|
| 1294 |
-
|
| 1295 |
-
|
| 1296 |
-
|
| 1297 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
|
| 1298 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
|
| 1299 |
|
|
@@ -1305,3 +1390,4 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
| 1305 |
demo.queue()
|
| 1306 |
demo.launch(show_error=True)
|
| 1307 |
|
|
|
|
|
|
| 1210 |
|
| 1211 |
|
| 1212 |
|
| 1213 |
+
# with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
| 1214 |
+
# with gr.Row():
|
| 1215 |
+
# with gr.Column():
|
| 1216 |
+
# state = gr.State()
|
| 1217 |
+
|
| 1218 |
+
# chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
| 1219 |
+
# choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
| 1220 |
+
# retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
| 1221 |
+
# model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2"], value="LM-1")
|
| 1222 |
+
|
| 1223 |
+
# # Link the dropdown change to handle_model_choice_change
|
| 1224 |
+
# model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|
| 1225 |
+
|
| 1226 |
+
# gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
| 1227 |
+
|
| 1228 |
+
# chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!", placeholder="Hey Radar...!!")
|
| 1229 |
+
# tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
|
| 1230 |
+
# retriever_button = gr.Button("Retriever")
|
| 1231 |
+
|
| 1232 |
+
# clear_button = gr.Button("Clear")
|
| 1233 |
+
# clear_button.click(lambda: [None, None], outputs=[chat_input, state])
|
| 1234 |
+
|
| 1235 |
+
# gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
|
| 1236 |
+
# location_output = gr.HTML()
|
| 1237 |
+
# audio_output = gr.Audio(interactive=False, autoplay=True)
|
| 1238 |
+
|
| 1239 |
+
# def stop_audio():
|
| 1240 |
+
# audio_output.stop()
|
| 1241 |
+
# return None
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
# # retriever_sequence = (
|
| 1245 |
+
# # retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
| 1246 |
+
# # .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
| 1247 |
+
# # .then(fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode, model_choice], outputs=[chatbot, audio_output], api_name="api_askchatbot_then_generateaudio")
|
| 1248 |
+
# # .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
| 1249 |
+
# # .then(fn=clear_textbox, inputs=[], outputs=[chat_input],api_name="api_clear_textbox")
|
| 1250 |
+
# # )
|
| 1251 |
+
|
| 1252 |
+
# retriever_sequence = (
|
| 1253 |
+
# retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
| 1254 |
+
# .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
| 1255 |
+
# # First, generate the bot response
|
| 1256 |
+
# .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
|
| 1257 |
+
# # Then, generate the TTS response based on the bot's response
|
| 1258 |
+
# .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
|
| 1259 |
+
# .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
| 1260 |
+
# .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
|
| 1261 |
+
# )
|
| 1262 |
+
|
| 1263 |
+
|
| 1264 |
+
|
| 1265 |
+
|
| 1266 |
+
|
| 1267 |
+
|
| 1268 |
+
# # chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output],api_name="api_stop_audio_recording")
|
| 1269 |
+
# # chat_input.submit(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory").then(
|
| 1270 |
+
# # fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode, model_choice], outputs=[chatbot, audio_output], api_name="api_askchatbot_then_generateaudio"
|
| 1271 |
+
# # ).then(
|
| 1272 |
+
# # fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
| 1273 |
+
# # ).then(
|
| 1274 |
+
# # fn=clear_textbox, inputs=[], outputs=[chat_input],api_name="api_clear_textbox"
|
| 1275 |
+
# # )
|
| 1276 |
+
|
| 1277 |
+
# chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
|
| 1278 |
+
# fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|
| 1279 |
+
# ).then(
|
| 1280 |
+
# # First, generate the bot response
|
| 1281 |
+
# fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
|
| 1282 |
+
# ).then(
|
| 1283 |
+
# # Then, generate the TTS response based on the bot's response
|
| 1284 |
+
# fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
|
| 1285 |
+
# ).then(
|
| 1286 |
+
# fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
| 1287 |
+
# ).then(
|
| 1288 |
+
# fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
|
| 1289 |
+
# )
|
| 1290 |
+
|
| 1291 |
+
|
| 1292 |
+
|
| 1293 |
+
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
|
| 1297 |
+
# audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
|
| 1298 |
+
# audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
|
| 1299 |
+
|
| 1300 |
+
# with gr.Column():
|
| 1301 |
+
# weather_output = gr.HTML(value=fetch_local_weather())
|
| 1302 |
+
# news_output = gr.HTML(value=fetch_local_news())
|
| 1303 |
+
# events_output = gr.HTML(value=fetch_local_events())
|
| 1304 |
+
|
| 1305 |
+
# demo.queue()
|
| 1306 |
+
# demo.launch(show_error=True)
|
| 1307 |
+
|
| 1308 |
+
|
| 1309 |
+
import gradio as gr
|
| 1310 |
+
|
| 1311 |
+
# List of example prompts
|
| 1312 |
+
example_prompts = [
|
| 1313 |
+
"What are some top restaurants in Birmingham?",
|
| 1314 |
+
"Tell me about the weather today in Birmingham.",
|
| 1315 |
+
"What events are happening in Birmingham today?",
|
| 1316 |
+
"Can you suggest some hotels in Birmingham?",
|
| 1317 |
+
"Give me information on flights from JFK to Birmingham."
|
| 1318 |
+
]
|
| 1319 |
+
|
| 1320 |
+
# JavaScript to handle click event and insert the text into the input box
|
| 1321 |
+
js_code = """
|
| 1322 |
+
function insertPrompt(text) {
|
| 1323 |
+
var inputBox = document.querySelector('#chat-input textarea');
|
| 1324 |
+
inputBox.value = text;
|
| 1325 |
+
}
|
| 1326 |
+
"""
|
| 1327 |
+
|
| 1328 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
| 1329 |
with gr.Row():
|
| 1330 |
with gr.Column():
|
|
|
|
| 1335 |
retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
| 1336 |
model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2"], value="LM-1")
|
| 1337 |
|
|
|
|
| 1338 |
model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|
| 1339 |
|
| 1340 |
gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
| 1341 |
|
| 1342 |
+
# Example Prompts in a table format
|
| 1343 |
+
example_prompts_html = "<h3>Example Prompts:</h3><table>"
|
| 1344 |
+
for prompt in example_prompts:
|
| 1345 |
+
example_prompts_html += f"<tr><td><a href='javascript:void(0);' onclick=\"insertPrompt('{prompt}');\">{prompt}</a></td></tr>"
|
| 1346 |
+
example_prompts_html += "</table>"
|
| 1347 |
+
|
| 1348 |
+
gr.HTML(example_prompts_html, elem_id="example-prompts")
|
| 1349 |
+
|
| 1350 |
+
chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!", placeholder="Hey Radar...!!", elem_id="chat-input")
|
| 1351 |
tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
|
| 1352 |
retriever_button = gr.Button("Retriever")
|
| 1353 |
|
|
|
|
| 1358 |
location_output = gr.HTML()
|
| 1359 |
audio_output = gr.Audio(interactive=False, autoplay=True)
|
| 1360 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1361 |
retriever_sequence = (
|
| 1362 |
retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
| 1363 |
.then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
|
|
|
| 1364 |
.then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
|
|
|
|
| 1365 |
.then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
|
| 1366 |
.then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
| 1367 |
.then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
|
| 1368 |
)
|
| 1369 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1370 |
chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
|
| 1371 |
fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|
| 1372 |
).then(
|
|
|
|
| 1373 |
fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
|
| 1374 |
).then(
|
|
|
|
| 1375 |
fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
|
| 1376 |
).then(
|
| 1377 |
fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
|
|
|
| 1379 |
fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
|
| 1380 |
)
|
| 1381 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1382 |
audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
|
| 1383 |
audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
|
| 1384 |
|
|
|
|
| 1390 |
demo.queue()
|
| 1391 |
demo.launch(show_error=True)
|
| 1392 |
|
| 1393 |
+
|