Pijush2023 commited on
Commit
5e12ce0
·
verified ·
1 Parent(s): 76bdb22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -722,6 +722,7 @@
722
  # demo.queue()
723
  # demo.launch(share=True)
724
 
 
725
  import gradio as gr
726
  import requests
727
  import os
@@ -1079,7 +1080,7 @@ def fetch_local_news():
1079
  api_key = os.environ['SERP_API']
1080
  url = f'https://serpapi.com/search.json?engine=google_news&q=birmingham headline&api_key={api_key}'
1081
  response = requests.get(url)
1082
- if response.status_code == 200:
1083
  results = response.json().get("news_results", [])
1084
  news_html = """
1085
  <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Birmingham Today</h2>
@@ -1347,6 +1348,13 @@ def update_images():
1347
  image_3 = generate_image(hardcoded_prompt_3)
1348
  return image_1, image_2, image_3
1349
 
 
 
 
 
 
 
 
1350
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1351
  with gr.Row():
1352
  with gr.Column():
@@ -1357,7 +1365,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1357
 
1358
  gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1359
 
1360
- chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!",placeholder="After Prompt,click Retriever Only")
1361
  chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input], api_name="voice_query")
1362
  tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta", "Gamma"], value="Alpha")
1363
  retriver_button = gr.Button("Retriever")
@@ -1377,7 +1385,10 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1377
 
1378
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
1379
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
1380
-
 
 
 
1381
  with gr.Column():
1382
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
1383
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
@@ -1390,7 +1401,3 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1390
 
1391
  demo.queue()
1392
  demo.launch(share=True)
1393
-
1394
-
1395
-
1396
-
 
722
  # demo.queue()
723
  # demo.launch(share=True)
724
 
725
+ import gradio as gr
726
  import gradio as gr
727
  import requests
728
  import os
 
1080
  api_key = os.environ['SERP_API']
1081
  url = f'https://serpapi.com/search.json?engine=google_news&q=birmingham headline&api_key={api_key}'
1082
  response = requests.get(url)
1083
+ if response.status_code == 200):
1084
  results = response.json().get("news_results", [])
1085
  news_html = """
1086
  <h2 style="font-family: 'Georgia', serif; color: #ff0000; background-color: #f8f8f8; padding: 10px; border-radius: 10px;">Birmingham Today</h2>
 
1348
  image_3 = generate_image(hardcoded_prompt_3)
1349
  return image_1, image_2, image_3
1350
 
1351
+ def manual_transcribe(audio):
1352
+ sr, y = audio[0], audio[1]
1353
+ y = y.astype(np.float32) / np.max(np.abs(y))
1354
+ result = pipe_asr({"array": y, "sampling_rate": sr}, return_timestamps=False)
1355
+ transcribed_text = result.get("text", "")
1356
+ return transcribed_text
1357
+
1358
  with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1359
  with gr.Row():
1360
  with gr.Column():
 
1365
 
1366
  gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1367
 
1368
+ chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!", placeholder="After Prompt, click Retriever Only")
1369
  chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input], api_name="voice_query")
1370
  tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta", "Gamma"], value="Alpha")
1371
  retriver_button = gr.Button("Retriever")
 
1385
 
1386
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy')
1387
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
1388
+
1389
+ send_transcribe_button = gr.Button("Send Transcribe")
1390
+ send_transcribe_button.click(manual_transcribe, inputs=audio_input, outputs=chat_input)
1391
+
1392
  with gr.Column():
1393
  image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
1394
  image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
 
1401
 
1402
  demo.queue()
1403
  demo.launch(share=True)