Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -99,6 +99,16 @@ def initialize_phi_model():
|
|
99 |
|
100 |
def initialize_gpt_model():
|
101 |
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
|
103 |
# Initialize both models
|
104 |
phi_pipe = initialize_phi_model()
|
@@ -334,12 +344,36 @@ Sure! Here's the information you requested:
|
|
334 |
"""
|
335 |
|
336 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
337 |
def generate_bot_response(history, choice, retrieval_mode, model_choice):
|
338 |
if not history:
|
339 |
return
|
340 |
|
341 |
-
# Select the model
|
342 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
343 |
|
344 |
response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
345 |
history[-1][1] = ""
|
@@ -353,6 +387,7 @@ def generate_bot_response(history, choice, retrieval_mode, model_choice):
|
|
353 |
|
354 |
|
355 |
|
|
|
356 |
def generate_tts_response(response, tts_choice):
|
357 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
358 |
if tts_choice == "Alpha":
|
@@ -451,11 +486,112 @@ def clean_response(response_text):
|
|
451 |
|
452 |
import traceback
|
453 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
454 |
def generate_answer(message, choice, retrieval_mode, selected_model):
|
455 |
logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
|
456 |
|
457 |
# Logic for disabling options for Phi-3.5
|
458 |
-
if selected_model ==
|
459 |
choice = None
|
460 |
retrieval_mode = None
|
461 |
|
@@ -468,21 +604,19 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
468 |
else:
|
469 |
prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
|
470 |
|
471 |
-
# Handle hotel-related queries
|
472 |
if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
473 |
logging.debug("Handling hotel-related query")
|
474 |
response = fetch_google_hotels()
|
475 |
logging.debug(f"Hotel response: {response}")
|
476 |
return response, extract_addresses(response)
|
477 |
|
478 |
-
# Handle restaurant-related queries
|
479 |
if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
480 |
logging.debug("Handling restaurant-related query")
|
481 |
response = fetch_yelp_restaurants()
|
482 |
logging.debug(f"Restaurant response: {response}")
|
483 |
return response, extract_addresses(response)
|
484 |
|
485 |
-
# Handle flight-related queries
|
486 |
if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
487 |
logging.debug("Handling flight-related query")
|
488 |
response = fetch_google_flights()
|
@@ -492,51 +626,22 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
492 |
# Retrieval-based response
|
493 |
if retrieval_mode == "VDB":
|
494 |
logging.debug("Using VDB retrieval mode")
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
logging.debug(f"LM-1 response: {response}")
|
512 |
-
return response['result'], extract_addresses(response['result'])
|
513 |
-
|
514 |
-
elif selected_model == phi_pipe:
|
515 |
-
logging.debug("Selected model: LM-2")
|
516 |
-
retriever = phi_retriever
|
517 |
-
context_documents = retriever.get_relevant_documents(message)
|
518 |
-
context = "\n".join([doc.page_content for doc in context_documents])
|
519 |
-
logging.debug(f"Retrieved context for LM-2: {context}")
|
520 |
-
|
521 |
-
# Use the correct template variable
|
522 |
-
prompt = phi_custom_template.format(context=context, question=message)
|
523 |
-
logging.debug(f"Generated LM-2 prompt: {prompt}")
|
524 |
-
|
525 |
-
response = selected_model(prompt, **{
|
526 |
-
"max_new_tokens": 400,
|
527 |
-
"return_full_text": True,
|
528 |
-
"temperature": 0.7,
|
529 |
-
"do_sample": True,
|
530 |
-
})
|
531 |
-
|
532 |
-
if response:
|
533 |
-
generated_text = response[0]['generated_text']
|
534 |
-
logging.debug(f"LM-2 Response: {generated_text}")
|
535 |
-
cleaned_response = clean_response(generated_text)
|
536 |
-
return cleaned_response, extract_addresses(cleaned_response)
|
537 |
-
else:
|
538 |
-
logging.error("LM-2 did not return any response.")
|
539 |
-
return "No response generated.", []
|
540 |
|
541 |
elif retrieval_mode == "KGF":
|
542 |
logging.debug("Using KGF retrieval mode")
|
@@ -1258,7 +1363,7 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1258 |
chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
1259 |
choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
1260 |
retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
1261 |
-
model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2"], value="LM-1")
|
1262 |
|
1263 |
# Link the dropdown change to handle_model_choice_change
|
1264 |
model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|
|
|
99 |
|
100 |
def initialize_gpt_model():
|
101 |
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o')
|
102 |
+
|
103 |
+
|
104 |
+
def initialize_gpt_mini_model():
|
105 |
+
return ChatOpenAI(api_key=os.environ['OPENAI_API_KEY'], temperature=0, model='gpt-4o-mini')
|
106 |
+
|
107 |
+
# Initialize the GPT-4o-mini model
|
108 |
+
gpt_mini_model = initialize_gpt_mini_model()
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
|
113 |
# Initialize both models
|
114 |
phi_pipe = initialize_phi_model()
|
|
|
344 |
"""
|
345 |
|
346 |
|
347 |
+
# def generate_bot_response(history, choice, retrieval_mode, model_choice):
|
348 |
+
# if not history:
|
349 |
+
# return
|
350 |
+
|
351 |
+
# # Select the model
|
352 |
+
# selected_model = chat_model if model_choice == "LM-1" else phi_pipe
|
353 |
+
|
354 |
+
# response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
355 |
+
# history[-1][1] = ""
|
356 |
+
|
357 |
+
# for character in response:
|
358 |
+
# history[-1][1] += character
|
359 |
+
# yield history # Stream each character as it is generated
|
360 |
+
# time.sleep(0.05) # Add a slight delay to simulate streaming
|
361 |
+
|
362 |
+
# yield history # Final yield with the complete response
|
363 |
+
|
364 |
def generate_bot_response(history, choice, retrieval_mode, model_choice):
|
365 |
if not history:
|
366 |
return
|
367 |
|
368 |
+
# Select the model based on the user's choice
|
369 |
+
if model_choice == "LM-1":
|
370 |
+
selected_model = chat_model
|
371 |
+
elif model_choice == "LM-2":
|
372 |
+
selected_model = phi_pipe
|
373 |
+
elif model_choice == "LM-3":
|
374 |
+
selected_model = gpt_mini_model
|
375 |
+
else:
|
376 |
+
selected_model = chat_model # Fallback to GPT-4o
|
377 |
|
378 |
response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
379 |
history[-1][1] = ""
|
|
|
387 |
|
388 |
|
389 |
|
390 |
+
|
391 |
def generate_tts_response(response, tts_choice):
|
392 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
393 |
if tts_choice == "Alpha":
|
|
|
486 |
|
487 |
import traceback
|
488 |
|
489 |
+
# def generate_answer(message, choice, retrieval_mode, selected_model):
|
490 |
+
# logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
|
491 |
+
|
492 |
+
# # Logic for disabling options for Phi-3.5
|
493 |
+
# if selected_model == "LM-2":
|
494 |
+
# choice = None
|
495 |
+
# retrieval_mode = None
|
496 |
+
|
497 |
+
# try:
|
498 |
+
# # Select the appropriate template based on the choice
|
499 |
+
# if choice == "Details":
|
500 |
+
# prompt_template = QA_CHAIN_PROMPT_1
|
501 |
+
# elif choice == "Conversational":
|
502 |
+
# prompt_template = QA_CHAIN_PROMPT_2
|
503 |
+
# else:
|
504 |
+
# prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
|
505 |
+
|
506 |
+
# # Handle hotel-related queries
|
507 |
+
# if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
508 |
+
# logging.debug("Handling hotel-related query")
|
509 |
+
# response = fetch_google_hotels()
|
510 |
+
# logging.debug(f"Hotel response: {response}")
|
511 |
+
# return response, extract_addresses(response)
|
512 |
+
|
513 |
+
# # Handle restaurant-related queries
|
514 |
+
# if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
515 |
+
# logging.debug("Handling restaurant-related query")
|
516 |
+
# response = fetch_yelp_restaurants()
|
517 |
+
# logging.debug(f"Restaurant response: {response}")
|
518 |
+
# return response, extract_addresses(response)
|
519 |
+
|
520 |
+
# # Handle flight-related queries
|
521 |
+
# if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
522 |
+
# logging.debug("Handling flight-related query")
|
523 |
+
# response = fetch_google_flights()
|
524 |
+
# logging.debug(f"Flight response: {response}")
|
525 |
+
# return response, extract_addresses(response)
|
526 |
+
|
527 |
+
# # Retrieval-based response
|
528 |
+
# if retrieval_mode == "VDB":
|
529 |
+
# logging.debug("Using VDB retrieval mode")
|
530 |
+
# if selected_model == chat_model:
|
531 |
+
# logging.debug("Selected model: LM-1")
|
532 |
+
# retriever = gpt_retriever
|
533 |
+
# context = retriever.get_relevant_documents(message)
|
534 |
+
# logging.debug(f"Retrieved context: {context}")
|
535 |
+
|
536 |
+
# prompt = prompt_template.format(context=context, question=message)
|
537 |
+
# logging.debug(f"Generated prompt: {prompt}")
|
538 |
+
|
539 |
+
# qa_chain = RetrievalQA.from_chain_type(
|
540 |
+
# llm=chat_model,
|
541 |
+
# chain_type="stuff",
|
542 |
+
# retriever=retriever,
|
543 |
+
# chain_type_kwargs={"prompt": prompt_template}
|
544 |
+
# )
|
545 |
+
# response = qa_chain({"query": message})
|
546 |
+
# logging.debug(f"LM-1 response: {response}")
|
547 |
+
# return response['result'], extract_addresses(response['result'])
|
548 |
+
|
549 |
+
# elif selected_model == phi_pipe:
|
550 |
+
# logging.debug("Selected model: LM-2")
|
551 |
+
# retriever = phi_retriever
|
552 |
+
# context_documents = retriever.get_relevant_documents(message)
|
553 |
+
# context = "\n".join([doc.page_content for doc in context_documents])
|
554 |
+
# logging.debug(f"Retrieved context for LM-2: {context}")
|
555 |
+
|
556 |
+
# # Use the correct template variable
|
557 |
+
# prompt = phi_custom_template.format(context=context, question=message)
|
558 |
+
# logging.debug(f"Generated LM-2 prompt: {prompt}")
|
559 |
+
|
560 |
+
# response = selected_model(prompt, **{
|
561 |
+
# "max_new_tokens": 400,
|
562 |
+
# "return_full_text": True,
|
563 |
+
# "temperature": 0.7,
|
564 |
+
# "do_sample": True,
|
565 |
+
# })
|
566 |
+
|
567 |
+
# if response:
|
568 |
+
# generated_text = response[0]['generated_text']
|
569 |
+
# logging.debug(f"LM-2 Response: {generated_text}")
|
570 |
+
# cleaned_response = clean_response(generated_text)
|
571 |
+
# return cleaned_response, extract_addresses(cleaned_response)
|
572 |
+
# else:
|
573 |
+
# logging.error("LM-2 did not return any response.")
|
574 |
+
# return "No response generated.", []
|
575 |
+
|
576 |
+
# elif retrieval_mode == "KGF":
|
577 |
+
# logging.debug("Using KGF retrieval mode")
|
578 |
+
# response = chain_neo4j.invoke({"question": message})
|
579 |
+
# logging.debug(f"KGF response: {response}")
|
580 |
+
# return response, extract_addresses(response)
|
581 |
+
# else:
|
582 |
+
# logging.error("Invalid retrieval mode selected.")
|
583 |
+
# return "Invalid retrieval mode selected.", []
|
584 |
+
|
585 |
+
# except Exception as e:
|
586 |
+
# logging.error(f"Error in generate_answer: {str(e)}")
|
587 |
+
# logging.error(traceback.format_exc())
|
588 |
+
# return "Sorry, I encountered an error while processing your request.", []
|
589 |
+
|
590 |
def generate_answer(message, choice, retrieval_mode, selected_model):
|
591 |
logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
|
592 |
|
593 |
# Logic for disabling options for Phi-3.5
|
594 |
+
if selected_model == phi_pipe:
|
595 |
choice = None
|
596 |
retrieval_mode = None
|
597 |
|
|
|
604 |
else:
|
605 |
prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
|
606 |
|
607 |
+
# Handle hotel, restaurant, and flight-related queries as before
|
608 |
if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
609 |
logging.debug("Handling hotel-related query")
|
610 |
response = fetch_google_hotels()
|
611 |
logging.debug(f"Hotel response: {response}")
|
612 |
return response, extract_addresses(response)
|
613 |
|
|
|
614 |
if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
615 |
logging.debug("Handling restaurant-related query")
|
616 |
response = fetch_yelp_restaurants()
|
617 |
logging.debug(f"Restaurant response: {response}")
|
618 |
return response, extract_addresses(response)
|
619 |
|
|
|
620 |
if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
621 |
logging.debug("Handling flight-related query")
|
622 |
response = fetch_google_flights()
|
|
|
626 |
# Retrieval-based response
|
627 |
if retrieval_mode == "VDB":
|
628 |
logging.debug("Using VDB retrieval mode")
|
629 |
+
retriever = gpt_retriever # Use the same retriever for all GPT models
|
630 |
+
context = retriever.get_relevant_documents(message)
|
631 |
+
logging.debug(f"Retrieved context: {context}")
|
632 |
+
|
633 |
+
prompt = prompt_template.format(context=context, question=message)
|
634 |
+
logging.debug(f"Generated prompt: {prompt}")
|
635 |
+
|
636 |
+
qa_chain = RetrievalQA.from_chain_type(
|
637 |
+
llm=selected_model,
|
638 |
+
chain_type="stuff",
|
639 |
+
retriever=retriever,
|
640 |
+
chain_type_kwargs={"prompt": prompt_template}
|
641 |
+
)
|
642 |
+
response = qa_chain({"query": message})
|
643 |
+
logging.debug(f"Response from {selected_model}: {response}")
|
644 |
+
return response['result'], extract_addresses(response['result'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
645 |
|
646 |
elif retrieval_mode == "KGF":
|
647 |
logging.debug("Using KGF retrieval mode")
|
|
|
1363 |
chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
|
1364 |
choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational")
|
1365 |
retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB")
|
1366 |
+
model_choice = gr.Dropdown(label="Choose Model", choices=["LM-1", "LM-2", "LM-3"], value="LM-1")
|
1367 |
|
1368 |
# Link the dropdown change to handle_model_choice_change
|
1369 |
model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
|