import json import logging from typing import Dict from langchain import PromptTemplate, LLMChain from models import chat_model logger = logging.getLogger(__name__) # Updated prompt template with guidance connecting themes to question responses problem_prompt_template = PromptTemplate( input_variables=["responses", "internal_report"], template=( "You are a wellness analyst. You have the following user responses to health-related questions:\n" "{responses}\n\n" "You also have an internal analysis report:\n" "{internal_report}\n\n" "From these inputs, determine a 'problem severity percentage' for the user in the following areas: " "stress_management, low_therapy, balanced_weight, restless_night, lack_of_motivation, gut_health, anxiety, burnout.\n\n" "Consider the following connections between the questions and these themes:\n" "- stress_management is influenced by responses such as stress_level, stress_management, mood, mindfulness_frequency, and similar stress-related questions.\n" "- low_therapy relates to aspects of the user's mindset, wellness_goals, personal_growth_reflection, and similar therapeutic indicators.\n" "- balanced_weight depends on exercise, eating_habits, dietary_restrictions, activity_tracking, and other fitness/nutrition details.\n" "- restless_night is linked to answers about sleep duration, bedtime_routine, uninterrupted_sleep, and other sleep quality indicators.\n" "- lack_of_motivation correlates with mood, energy_rating, personal_growth_reflection, break_frequency, and similar motivation-related queries.\n" "- gut_health is connected to eating_habits, dietary_restrictions, health_issues, and other digestive or nutritional feedback.\n" "- anxiety is associated with responses about mood, stress_level, mindset, and related emotional well-being questions.\n" "- burnout may be reflected in high stress_level, low energy_rating, lack_of_motivation, and related fatigue or overwhelm indicators.\n\n" "Return your answer in JSON format with keys: stress_management, low_therapy, balanced_weight, restless_night, " "lack_of_motivation, gut_health, anxiety, burnout.\n" "Ensure severity percentages are numbers from 0 to 100.\n\n" "JSON Output:" ) ) problem_chain = LLMChain(llm=chat_model, prompt=problem_prompt_template) def analyze_problems_with_chain(responses: Dict[str, str], internal_report: str) -> Dict[str, float]: responses_str = "\n".join(f"{q}: {a}" for q, a in responses.items()) raw_text = problem_chain.run(responses=responses_str, internal_report=internal_report) try: # Extract JSON from the LLM output start_idx = raw_text.find('{') end_idx = raw_text.rfind('}') + 1 json_str = raw_text[start_idx:end_idx] problems = json.loads(json_str) # Ensure all eight keys are present with default values for key in [ "stress_management", "low_therapy", "balanced_weight", "restless_night", "lack_of_motivation", "gut_health", "anxiety", "burnout" ]: problems.setdefault(key, 0.0) return {k: float(v) for k, v in problems.items()} except Exception as e: logger.error(f"Error parsing problem percentages from LLM: {e}") # Return default values for all eight themes in case of an error return { "stress_management": 0.0, "low_therapy": 0.0, "balanced_weight": 0.0, "restless_night": 0.0, "lack_of_motivation": 0.0, "gut_health": 0.0, "anxiety": 0.0, "burnout": 0.0 }