Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -567,30 +567,42 @@ def clean_response(response_text):
|
|
567 |
import traceback
|
568 |
|
569 |
def generate_answer(message, choice, retrieval_mode, selected_model):
|
570 |
-
logging.debug(f"generate_answer called with choice: {choice}
|
571 |
|
572 |
try:
|
573 |
# Handle hotel-related queries
|
574 |
if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
|
|
575 |
response = fetch_google_hotels()
|
|
|
576 |
return response, extract_addresses(response)
|
577 |
|
578 |
# Handle restaurant-related queries
|
579 |
if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
|
|
580 |
response = fetch_yelp_restaurants()
|
|
|
581 |
return response, extract_addresses(response)
|
582 |
|
583 |
# Handle flight-related queries
|
584 |
if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
|
|
585 |
response = fetch_google_flights()
|
|
|
586 |
return response, extract_addresses(response)
|
587 |
|
|
|
588 |
if retrieval_mode == "VDB":
|
|
|
589 |
if selected_model == chat_model:
|
|
|
590 |
retriever = gpt_retriever
|
591 |
prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
|
592 |
context = retriever.get_relevant_documents(message)
|
|
|
|
|
593 |
prompt = prompt_template.format(context=context, question=message)
|
|
|
594 |
|
595 |
qa_chain = RetrievalQA.from_chain_type(
|
596 |
llm=chat_model,
|
@@ -599,17 +611,18 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
599 |
chain_type_kwargs={"prompt": prompt_template}
|
600 |
)
|
601 |
response = qa_chain({"query": message})
|
|
|
602 |
return response['result'], extract_addresses(response['result'])
|
603 |
|
604 |
elif selected_model == phi_pipe:
|
|
|
605 |
retriever = phi_retriever
|
606 |
context_documents = retriever.get_relevant_documents(message)
|
607 |
context = "\n".join([doc.page_content for doc in context_documents])
|
|
|
608 |
|
609 |
-
# Integrating the custom context and question into the base prompt template
|
610 |
prompt = phi_base_template.format(context=context, question=message)
|
611 |
-
|
612 |
-
logging.debug(f"Phi-3.5 Prompt: {prompt}")
|
613 |
|
614 |
response = selected_model(prompt, **{
|
615 |
"max_new_tokens": 160, # Increased to handle longer responses
|
@@ -628,13 +641,15 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
|
|
628 |
return "No response generated.", []
|
629 |
|
630 |
elif retrieval_mode == "KGF":
|
|
|
631 |
response = chain_neo4j.invoke({"question": message})
|
|
|
632 |
return response, extract_addresses(response)
|
633 |
else:
|
|
|
634 |
return "Invalid retrieval mode selected.", []
|
635 |
|
636 |
except Exception as e:
|
637 |
-
# Log the error details
|
638 |
logging.error(f"Error in generate_answer: {str(e)}")
|
639 |
logging.error(traceback.format_exc())
|
640 |
return "Sorry, I encountered an error while processing your request.", []
|
|
|
567 |
import traceback
|
568 |
|
569 |
def generate_answer(message, choice, retrieval_mode, selected_model):
|
570 |
+
logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
|
571 |
|
572 |
try:
|
573 |
# Handle hotel-related queries
|
574 |
if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
|
575 |
+
logging.debug("Handling hotel-related query")
|
576 |
response = fetch_google_hotels()
|
577 |
+
logging.debug(f"Hotel response: {response}")
|
578 |
return response, extract_addresses(response)
|
579 |
|
580 |
# Handle restaurant-related queries
|
581 |
if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
|
582 |
+
logging.debug("Handling restaurant-related query")
|
583 |
response = fetch_yelp_restaurants()
|
584 |
+
logging.debug(f"Restaurant response: {response}")
|
585 |
return response, extract_addresses(response)
|
586 |
|
587 |
# Handle flight-related queries
|
588 |
if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
|
589 |
+
logging.debug("Handling flight-related query")
|
590 |
response = fetch_google_flights()
|
591 |
+
logging.debug(f"Flight response: {response}")
|
592 |
return response, extract_addresses(response)
|
593 |
|
594 |
+
# Retrieval-based response
|
595 |
if retrieval_mode == "VDB":
|
596 |
+
logging.debug("Using VDB retrieval mode")
|
597 |
if selected_model == chat_model:
|
598 |
+
logging.debug("Selected model: GPT-4o")
|
599 |
retriever = gpt_retriever
|
600 |
prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
|
601 |
context = retriever.get_relevant_documents(message)
|
602 |
+
logging.debug(f"Retrieved context: {context}")
|
603 |
+
|
604 |
prompt = prompt_template.format(context=context, question=message)
|
605 |
+
logging.debug(f"Generated prompt: {prompt}")
|
606 |
|
607 |
qa_chain = RetrievalQA.from_chain_type(
|
608 |
llm=chat_model,
|
|
|
611 |
chain_type_kwargs={"prompt": prompt_template}
|
612 |
)
|
613 |
response = qa_chain({"query": message})
|
614 |
+
logging.debug(f"GPT-4o response: {response}")
|
615 |
return response['result'], extract_addresses(response['result'])
|
616 |
|
617 |
elif selected_model == phi_pipe:
|
618 |
+
logging.debug("Selected model: Phi-3.5")
|
619 |
retriever = phi_retriever
|
620 |
context_documents = retriever.get_relevant_documents(message)
|
621 |
context = "\n".join([doc.page_content for doc in context_documents])
|
622 |
+
logging.debug(f"Retrieved context for Phi-3.5: {context}")
|
623 |
|
|
|
624 |
prompt = phi_base_template.format(context=context, question=message)
|
625 |
+
logging.debug(f"Generated Phi-3.5 prompt: {prompt}")
|
|
|
626 |
|
627 |
response = selected_model(prompt, **{
|
628 |
"max_new_tokens": 160, # Increased to handle longer responses
|
|
|
641 |
return "No response generated.", []
|
642 |
|
643 |
elif retrieval_mode == "KGF":
|
644 |
+
logging.debug("Using KGF retrieval mode")
|
645 |
response = chain_neo4j.invoke({"question": message})
|
646 |
+
logging.debug(f"KGF response: {response}")
|
647 |
return response, extract_addresses(response)
|
648 |
else:
|
649 |
+
logging.error("Invalid retrieval mode selected.")
|
650 |
return "Invalid retrieval mode selected.", []
|
651 |
|
652 |
except Exception as e:
|
|
|
653 |
logging.error(f"Error in generate_answer: {str(e)}")
|
654 |
logging.error(traceback.format_exc())
|
655 |
return "Sorry, I encountered an error while processing your request.", []
|