|
def handle_query(query: str, detail: bool = False) -> str: |
|
""" |
|
Main function to process the query. |
|
:param query: The user's question. |
|
:param detail: Whether the user wants a more detailed response. |
|
:return: Response string from Daily Wellness AI. |
|
""" |
|
if not query or not isinstance(query, str) or len(query.strip()) == 0: |
|
return "Please provide a valid question." |
|
|
|
try: |
|
|
|
is_relevant = sanity_checker.is_relevant(query) |
|
if not is_relevant: |
|
return "Your question seems out of context or not related to daily wellness. Please ask a wellness-related question." |
|
|
|
|
|
retrieved = retriever.retrieve(query) |
|
|
|
|
|
cached_answer = get_cached_answer(query) |
|
|
|
|
|
if not retrieved: |
|
|
|
if cached_answer: |
|
logger.info("No relevant entries found in knowledge base. Returning cached answer.") |
|
return cached_answer |
|
|
|
return "I'm sorry, I couldn't find an answer to your question." |
|
|
|
|
|
top_score = retrieved[0][1] |
|
similarity_threshold = 0.3 |
|
|
|
if top_score < similarity_threshold: |
|
|
|
logger.info("Similarity score below threshold. Performing web search.") |
|
web_search_response = manager_agent.run(query) |
|
logger.debug(f"Web search response: {web_search_response}") |
|
|
|
|
|
if cached_answer: |
|
blend_prompt = ( |
|
f"Combine the following previous answer with the new web results to create a more creative and accurate response. " |
|
f"Do not include any of the previous prompt or instructions in your response. " |
|
f"Add positivity and conclude with a short inspirational note.\n\n" |
|
f"Previous Answer:\n{cached_answer}\n\n" |
|
f"Web Results:\n{web_search_response}" |
|
) |
|
final_answer = llm._call(blend_prompt).strip() |
|
else: |
|
|
|
final_answer = ( |
|
f"**Daily Wellness AI**\n\n" |
|
f"{web_search_response}\n\n" |
|
"Disclaimer: This information is retrieved from the web and is not a substitute for professional medical advice.\n\n" |
|
"Wishing you a calm and wonderful day!" |
|
) |
|
|
|
|
|
store_in_cache(query, final_answer) |
|
return final_answer |
|
|
|
|
|
responses = [ans for ans, score in retrieved] |
|
|
|
|
|
if cached_answer: |
|
blend_prompt = ( |
|
f"Combine the previous answer with the newly retrieved answers to enhance creativity and accuracy. " |
|
f"Do not include any of the previous prompt or instructions in your response. " |
|
f"Add new insights, creativity, and conclude with a short inspirational note.\n\n" |
|
f"Previous Answer:\n{cached_answer}\n\n" |
|
f"New Retrieved Answers:\n" + "\n".join(f"- {r}" for r in responses) |
|
) |
|
final_answer = llm._call(blend_prompt).strip() |
|
else: |
|
|
|
final_answer = answer_expander.expand(query, responses, detail=detail) |
|
|
|
|
|
store_in_cache(query, final_answer) |
|
return final_answer |
|
|
|
except Exception as e: |
|
logger.error(f"Error handling query: {e}") |
|
logger.debug("Exception details:", exc_info=True) |
|
return "An error occurred while processing your request." |
|
|