def handle_query(query: str, detail: bool = False) -> str: """ Main function to process the query. :param query: The user's question. :param detail: Whether the user wants a more detailed response. :return: Response string from Daily Wellness AI. """ if not query or not isinstance(query, str) or len(query.strip()) == 0: return "Please provide a valid question." try: # 1) Sanity Check: Determine if the question is relevant to daily wellness is_relevant = sanity_checker.is_relevant(query) if not is_relevant: return "Your question seems out of context or not related to daily wellness. Please ask a wellness-related question." # 2) Proceed with retrieval from the knowledge base retrieved = retriever.retrieve(query) # 3) Check the cache cached_answer = get_cached_answer(query) # 4) If no retrieved data from the knowledge base if not retrieved: # If we do have a cached answer, return it if cached_answer: logger.info("No relevant entries found in knowledge base. Returning cached answer.") return cached_answer # Otherwise, no KB results and no cache => no answer return "I'm sorry, I couldn't find an answer to your question." # 5) We have retrieved data; let's check for similarity threshold top_score = retrieved[0][1] # Assuming the list is sorted descending similarity_threshold = 0.3 # Adjust this threshold based on empirical results if top_score < similarity_threshold: # (Low similarity) Perform web search using manager_agent logger.info("Similarity score below threshold. Performing web search.") web_search_response = manager_agent.run(query) logger.debug(f"Web search response: {web_search_response}") # Combine any cached answer (if it exists) with the web result if cached_answer: blend_prompt = ( f"Combine the following previous answer with the new web results to create a more creative and accurate response. " f"Do not include any of the previous prompt or instructions in your response. " f"Add positivity and conclude with a short inspirational note.\n\n" f"Previous Answer:\n{cached_answer}\n\n" f"Web Results:\n{web_search_response}" ) final_answer = llm._call(blend_prompt).strip() else: # If no cache, just return the web response final_answer = ( f"**Daily Wellness AI**\n\n" f"{web_search_response}\n\n" "Disclaimer: This information is retrieved from the web and is not a substitute for professional medical advice.\n\n" "Wishing you a calm and wonderful day!" ) # Store in cache store_in_cache(query, final_answer) return final_answer # 6) If similarity is sufficient, we will finalize an answer from the knowledge base responses = [ans for ans, score in retrieved] # 6a) If we have a cached answer, let's blend it with the new knowledge base data if cached_answer: blend_prompt = ( f"Combine the previous answer with the newly retrieved answers to enhance creativity and accuracy. " f"Do not include any of the previous prompt or instructions in your response. " f"Add new insights, creativity, and conclude with a short inspirational note.\n\n" f"Previous Answer:\n{cached_answer}\n\n" f"New Retrieved Answers:\n" + "\n".join(f"- {r}" for r in responses) ) final_answer = llm._call(blend_prompt).strip() else: # 6b) No cache => proceed with normal expansions final_answer = answer_expander.expand(query, responses, detail=detail) # 7) Store new or blended answer in cache store_in_cache(query, final_answer) return final_answer except Exception as e: logger.error(f"Error handling query: {e}") logger.debug("Exception details:", exc_info=True) return "An error occurred while processing your request."