Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -311,41 +311,6 @@ chain_neo4j = (
|
|
311 |
|
312 |
|
313 |
|
314 |
-
# def generate_answer(message, choice, retrieval_mode):
|
315 |
-
# logging.debug(f"generate_answer called with choice: {choice} and retrieval_mode: {retrieval_mode}")
|
316 |
-
|
317 |
-
# # Check if the question is about hotels
|
318 |
-
# if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
319 |
-
# response = fetch_google_hotels()
|
320 |
-
# return response, extract_addresses(response)
|
321 |
-
|
322 |
-
# # Check if the question is about restaurants
|
323 |
-
# if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
324 |
-
# response = fetch_yelp_restaurants()
|
325 |
-
# return response, extract_addresses(response)
|
326 |
-
# # Check if the question is about flights
|
327 |
-
# if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
328 |
-
# response = fetch_google_flights()
|
329 |
-
# return response, extract_addresses(response)
|
330 |
-
|
331 |
-
# prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
|
332 |
-
|
333 |
-
# if retrieval_mode == "VDB":
|
334 |
-
# qa_chain = RetrievalQA.from_chain_type(
|
335 |
-
# llm=chat_model,
|
336 |
-
# chain_type="stuff",
|
337 |
-
# retriever=retriever,
|
338 |
-
# chain_type_kwargs={"prompt": prompt_template}
|
339 |
-
# )
|
340 |
-
# response = qa_chain({"query": message})
|
341 |
-
# logging.debug(f"Vector response: {response}")
|
342 |
-
# return response['result'], extract_addresses(response['result'])
|
343 |
-
# elif retrieval_mode == "KGF":
|
344 |
-
# response = chain_neo4j.invoke({"question": message})
|
345 |
-
# logging.debug(f"Knowledge-Graph response: {response}")
|
346 |
-
# return response, extract_addresses(response)
|
347 |
-
# else:
|
348 |
-
# return "Invalid retrieval mode selected.", []
|
349 |
|
350 |
|
351 |
|
@@ -424,30 +389,7 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
424 |
return "Sorry, I encountered an error while processing your request.", []
|
425 |
|
426 |
|
427 |
-
# def bot(history, choice, tts_choice, retrieval_mode):
|
428 |
-
# if not history:
|
429 |
-
# return history
|
430 |
-
|
431 |
-
# response, addresses = generate_answer(history[-1][0], choice, retrieval_mode)
|
432 |
-
# history[-1][1] = ""
|
433 |
-
|
434 |
-
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
435 |
-
# if tts_choice == "Alpha":
|
436 |
-
# audio_future = executor.submit(generate_audio_elevenlabs, response)
|
437 |
-
# elif tts_choice == "Beta":
|
438 |
-
# audio_future = executor.submit(generate_audio_parler_tts, response)
|
439 |
-
# elif tts_choice == "Gamma":
|
440 |
-
# audio_future = executor.submit(generate_audio_mars5, response)
|
441 |
-
|
442 |
-
# for character in response:
|
443 |
-
# history[-1][1] += character
|
444 |
-
# time.sleep(0.05)
|
445 |
-
# yield history, None
|
446 |
-
|
447 |
-
# audio_path = audio_future.result()
|
448 |
-
# yield history, audio_path
|
449 |
|
450 |
-
# history.append([response, None]) # Ensure the response is added in the correct format
|
451 |
|
452 |
|
453 |
|
@@ -797,70 +739,6 @@ def generate_audio_parler_tts(text):
|
|
797 |
|
798 |
|
799 |
|
800 |
-
|
801 |
-
|
802 |
-
# # Load the MARS5 model
|
803 |
-
# mars5, config_class = torch.hub.load('Camb-ai/mars5-tts', 'mars5_english', trust_repo=True)
|
804 |
-
|
805 |
-
# def generate_audio_mars5(text):
|
806 |
-
# description = "Thomas speaks with emphasis and excitement at a moderate pace with high quality."
|
807 |
-
# kwargs_dict = {
|
808 |
-
# 'temperature': 0.2,
|
809 |
-
# 'top_k': -1,
|
810 |
-
# 'top_p': 0.2,
|
811 |
-
# 'typical_p': 1.0,
|
812 |
-
# 'freq_penalty': 2.6,
|
813 |
-
# 'presence_penalty': 0.4,
|
814 |
-
# 'rep_penalty_window': 100,
|
815 |
-
# 'max_prompt_phones': 360,
|
816 |
-
# 'deep_clone': True,
|
817 |
-
# 'nar_guidance_w': 3
|
818 |
-
# }
|
819 |
-
|
820 |
-
# chunks = chunk_text(preprocess(text))
|
821 |
-
# audio_segments = []
|
822 |
-
|
823 |
-
# for chunk in chunks:
|
824 |
-
# wav = torch.zeros(1, mars5.sr) # Use a placeholder silent audio for the reference
|
825 |
-
# cfg = config_class(**{k: kwargs_dict[k] for k in kwargs_dict if k in config_class.__dataclass_fields__})
|
826 |
-
# ar_codes, wav_out = mars5.tts(chunk, wav, "", cfg=cfg)
|
827 |
-
|
828 |
-
# temp_audio_path = os.path.join(tempfile.gettempdir(), f"mars5_audio_{len(audio_segments)}.wav")
|
829 |
-
# torchaudio.save(temp_audio_path, wav_out.unsqueeze(0), mars5.sr)
|
830 |
-
# audio_segments.append(AudioSegment.from_wav(temp_audio_path))
|
831 |
-
|
832 |
-
# combined_audio = sum(audio_segments)
|
833 |
-
# combined_audio_path = os.path.join(tempfile.gettempdir(), "mars5_combined_audio.wav")
|
834 |
-
# combined_audio.export(combined_audio_path, format="wav")
|
835 |
-
|
836 |
-
# logging.debug(f"Audio saved to {combined_audio_path}")
|
837 |
-
# return combined_audio_path
|
838 |
-
|
839 |
-
# pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16)
|
840 |
-
# pipe.to(device)
|
841 |
-
|
842 |
-
# def generate_image(prompt):
|
843 |
-
# with torch.cuda.amp.autocast():
|
844 |
-
# image = pipe(
|
845 |
-
# prompt,
|
846 |
-
# num_inference_steps=28,
|
847 |
-
# guidance_scale=3.0,
|
848 |
-
# ).images[0]
|
849 |
-
# return image
|
850 |
-
|
851 |
-
# hardcoded_prompt_1 = "Give a high quality photograph of a great looking red 2026 Toyota coupe against a skyline setting in the night, michael mann style in omaha enticing the consumer to buy this product"
|
852 |
-
# hardcoded_prompt_2 = "A vibrant and dynamic football game scene in the style of Peter Paul Rubens, showcasing the intense match between Alabama and Nebraska. The players are depicted with the dramatic, muscular physiques and expressive faces typical of Rubens' style. The Alabama team is wearing their iconic crimson and white uniforms, while the Nebraska team is in their classic red and white attire. The scene is filled with action, with players in mid-motion, tackling, running, and catching the ball. The background features a grand stadium filled with cheering fans, banners, and the natural landscape in the distance. The colors are rich and vibrant, with a strong use of light and shadow to create depth and drama. The overall atmosphere captures the intensity and excitement of the game, infused with the grandeur and dynamism characteristic of Rubens' work."
|
853 |
-
# hardcoded_prompt_3 = "Create a high-energy scene of a DJ performing on a large stage with vibrant lights, colorful lasers, a lively dancing crowd, and various electronic equipment in the background."
|
854 |
-
|
855 |
-
# def update_images():
|
856 |
-
# image_1 = generate_image(hardcoded_prompt_1)
|
857 |
-
# image_2 = generate_image(hardcoded_prompt_2)
|
858 |
-
# image_3 = generate_image(hardcoded_prompt_3)
|
859 |
-
# return image_1, image_2, image_3
|
860 |
-
|
861 |
-
|
862 |
-
|
863 |
-
|
864 |
def fetch_local_events():
|
865 |
api_key = os.environ['SERP_API']
|
866 |
url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Birmingham&hl=en&gl=us&api_key={api_key}'
|
@@ -1189,90 +1067,6 @@ def fetch_google_flights(departure_id="JFK", arrival_id="BHM", outbound_date=cur
|
|
1189 |
|
1190 |
return flight_info
|
1191 |
|
1192 |
-
|
1193 |
-
|
1194 |
-
|
1195 |
-
|
1196 |
-
|
1197 |
-
|
1198 |
-
|
1199 |
-
|
1200 |
-
|
1201 |
-
|
1202 |
-
# with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
1203 |
-
# with gr.Row():
|
1204 |
-
# with gr.Column():
|
1205 |
-
# state = gr.State()
|
1206 |
-
|
1207 |
-
# chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
1208 |
-
# choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
1209 |
-
# retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
1210 |
-
|
1211 |
-
# gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
|
1212 |
-
|
1213 |
-
# chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!", placeholder="Hey Radar...!!")
|
1214 |
-
# tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta", "Gamma"], value="Alpha")
|
1215 |
-
# retriever_button = gr.Button("Retriever")
|
1216 |
-
|
1217 |
-
# clear_button = gr.Button("Clear")
|
1218 |
-
# clear_button.click(lambda:[None,None] ,outputs=[chat_input, state])
|
1219 |
-
|
1220 |
-
# gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
|
1221 |
-
# location_output = gr.HTML()
|
1222 |
-
|
1223 |
-
# # Define a single audio component
|
1224 |
-
# audio_output = gr.Audio(interactive=False, autoplay=True)
|
1225 |
-
|
1226 |
-
# def stop_audio():
|
1227 |
-
# audio_output.stop()
|
1228 |
-
# return None
|
1229 |
-
|
1230 |
-
# # Define the sequence of actions for the "Retriever" button
|
1231 |
-
# retriever_sequence = (
|
1232 |
-
# retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output],api_name="Ask_Retriever")
|
1233 |
-
# .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input],api_name="voice_query")
|
1234 |
-
# .then(fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode], outputs=[chatbot, audio_output],api_name="generate_voice_response" )
|
1235 |
-
# .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="map_finder")
|
1236 |
-
# .then(fn=clear_textbox, inputs=[], outputs=[chat_input])
|
1237 |
-
# )
|
1238 |
-
|
1239 |
-
# # Link the "Enter" key (submit event) to the same sequence of actions
|
1240 |
-
# chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output])
|
1241 |
-
# chat_input.submit(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input],api_name="voice_query").then(
|
1242 |
-
# fn=bot, inputs=[chatbot, choice, tts_choice, retrieval_mode], outputs=[chatbot, audio_output], api_name="generate_voice_response"
|
1243 |
-
# ).then(
|
1244 |
-
# fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="map_finder"
|
1245 |
-
# ).then(
|
1246 |
-
# fn=clear_textbox, inputs=[], outputs=[chat_input]
|
1247 |
-
# )
|
1248 |
-
|
1249 |
-
# audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
|
1250 |
-
# audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="voice_query_to_text")
|
1251 |
-
|
1252 |
-
# # Handle retrieval mode change
|
1253 |
-
# retrieval_mode.change(fn=handle_retrieval_mode_change, inputs=retrieval_mode, outputs=[choice, choice])
|
1254 |
-
|
1255 |
-
# with gr.Column():
|
1256 |
-
# weather_output = gr.HTML(value=fetch_local_weather())
|
1257 |
-
# news_output = gr.HTML(value=fetch_local_news())
|
1258 |
-
# events_output = gr.HTML(value=fetch_local_events())
|
1259 |
-
# # restaurant_output=gr.HTML(value=fetch_yelp_restaurants())
|
1260 |
-
|
1261 |
-
|
1262 |
-
|
1263 |
-
|
1264 |
-
# with gr.Column():
|
1265 |
-
# image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
1266 |
-
# image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
1267 |
-
# image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
1268 |
-
|
1269 |
-
# refresh_button = gr.Button("Refresh Images")
|
1270 |
-
# refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3], api_name="update_image")
|
1271 |
-
|
1272 |
-
|
1273 |
-
# demo.queue()
|
1274 |
-
# demo.launch(share=True)
|
1275 |
-
|
1276 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
1277 |
with gr.Row():
|
1278 |
with gr.Column():
|
@@ -1327,13 +1121,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1327 |
news_output = gr.HTML(value=fetch_local_news())
|
1328 |
events_output = gr.HTML(value=fetch_local_events())
|
1329 |
|
1330 |
-
|
1331 |
-
# image_output_1 = gr.Image(value=generate_image(hardcoded_prompt_1), width=400, height=400)
|
1332 |
-
# image_output_2 = gr.Image(value=generate_image(hardcoded_prompt_2), width=400, height=400)
|
1333 |
-
# image_output_3 = gr.Image(value=generate_image(hardcoded_prompt_3), width=400, height=400)
|
1334 |
-
|
1335 |
-
# refresh_button = gr.Button("Refresh Images")
|
1336 |
-
# refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3], api_name="update_image")
|
1337 |
|
1338 |
demo.queue()
|
1339 |
demo.launch(share=True)
|
|
|
311 |
|
312 |
|
313 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
|
315 |
|
316 |
|
|
|
389 |
return "Sorry, I encountered an error while processing your request.", []
|
390 |
|
391 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
392 |
|
|
|
393 |
|
394 |
|
395 |
|
|
|
739 |
|
740 |
|
741 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
742 |
def fetch_local_events():
|
743 |
api_key = os.environ['SERP_API']
|
744 |
url = f'https://serpapi.com/search.json?engine=google_events&q=Events+in+Birmingham&hl=en&gl=us&api_key={api_key}'
|
|
|
1067 |
|
1068 |
return flight_info
|
1069 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1070 |
with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
1071 |
with gr.Row():
|
1072 |
with gr.Column():
|
|
|
1121 |
news_output = gr.HTML(value=fetch_local_news())
|
1122 |
events_output = gr.HTML(value=fetch_local_events())
|
1123 |
|
1124 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
1125 |
|
1126 |
demo.queue()
|
1127 |
demo.launch(share=True)
|