Spaces:
Sleeping
Sleeping
File size: 10,331 Bytes
a3c7b61 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
from backend.config import (
gpt4o_mini,
gpt4o_mini_with_tools,
gpt4o_with_tools,
deepseek_with_tools,
)
from backend.goal_extraction import extract_goal_details, generate_confirmation_prompt
from backend.prompts.personas import PERSONA_PROMPTS
from tools.goal_tools import add_goal_tool, list_goal_categories
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage
from backend.rag_utils import format_profile_goals_and_moods
from langsmith import traceable
from backend.cache_utils import get_cached_route, cache_route
import time
def sanitize_history(history):
sanitized = []
for h in history:
if hasattr(h, "role") and hasattr(h, "content"):
sanitized.append({"role": h.role, "content": h.content})
elif isinstance(h, dict):
sanitized.append(h)
return sanitized
async def route_message(user_message: str):
start_time = time.time()
# Try cache first
cached_route = get_cached_route(user_message)
if cached_route:
print(f"[TIMING] Message routing (cached): {(time.time() - start_time) * 1000:.2f}ms (route: {cached_route})")
return cached_route
system = (
"You are a routing assistant for a wellness chatbot. "
"Given a user's message, decide which wellness domain it best fits. "
"Reply with only one word (all lowercase) from this list: "
"'mental', 'physical', 'spiritual', 'vocational', 'environmental', 'financial', 'social', or 'intellectual'."
" If it does not fit any, reply with 'main'."
)
try:
routing_response = await gpt4o_mini.ainvoke([
SystemMessage(content=system),
HumanMessage(content=user_message),
])
route = routing_response.content.strip().lower()
allowed = [
"mental", "physical", "spiritual", "vocational",
"environmental", "financial", "social", "intellectual"
]
final_route = route if route in allowed else "main"
# Cache the result
cache_route(user_message, final_route)
print(f"[TIMING] Message routing (fresh): {(time.time() - start_time) * 1000:.2f}ms (route: {final_route})")
return final_route
except Exception as e:
print(f"Routing error: {e}")
return "main"
async def is_goal_setting_intent(user_message: str, conversation_history: list = None) -> bool:
"""Use LLM to determine if user is trying to set a goal"""
system_prompt = (
"Determine if the user is trying to SET A NEW GOAL or ADD A GOAL. "
"Return 'true' only if they are explicitly trying to create/add/set a wellness goal. "
"Return 'false' if they are just describing problems, sharing feelings, or having a conversation. "
"Examples of goal-setting: 'I want to exercise more', 'Help me lose weight', 'Set a goal to meditate' "
"Examples of NOT goal-setting: 'I'm stressed', 'Work is overwhelming', 'I feel anxious' "
"Reply with only 'true' or 'false'."
)
try:
response = await gpt4o_mini.ainvoke([
SystemMessage(content=system_prompt),
HumanMessage(content=user_message)
])
return response.content.strip().lower() == 'true'
except Exception:
return False
async def execute_tool_call(tool_call, user_id):
tool_name = tool_call["name"]
tool_args = tool_call["args"]
if user_id:
tool_args["user_id"] = user_id
try:
if tool_name in ("add_goal", "add_goal_tool"):
return add_goal_tool.invoke(tool_args)
elif tool_name == "list_goal_categories":
return list_goal_categories.invoke(tool_args)
else:
return {"error": f"Unknown tool: {tool_name}"}
except Exception as e:
print(f"Tool execution error: {e}")
import traceback; traceback.print_exc()
return {"error": str(e)}
@traceable(tags=["persona", "tabi_chat"], metadata={"component": "persona_router"})
async def get_reply(agent_type, history, user_data=None, user_id=None):
print(f"Getting reply for agent_type: {agent_type}, user_id: {user_id}")
from langsmith.run_helpers import get_current_run_tree
try:
current_run = get_current_run_tree()
if current_run:
current_run.name = f"Persona: {agent_type}"
current_run.metadata.update({
"persona_type": agent_type,
"user_id": user_id,
"has_user_data": bool(user_data)
})
except:
pass
# ---- BEGIN: Improved Goal Clarification Logic ----
CLARIFY_FIRST = {"physical", "mental", "spiritual", "social", "financial", "intellectual", "vocational", "environmental"}
CATEGORY_OPTIONS = [
"physical", "mental", "spiritual", "social",
"financial", "intellectual", "vocational", "environmental"
]
if agent_type in CLARIFY_FIRST and history:
user_message = history[-1]["content"].strip().lower()
if user_message in CATEGORY_OPTIONS:
# 1. Find the previous user message (likely the goal description)
prev_goal_msg = None
for msg in reversed(history[:-1]):
if msg["role"] == "user":
prev_goal_msg = msg["content"]
break
# 2. Extract all details from previous message, then set the selected category
details = await extract_goal_details(prev_goal_msg or "", history)
details["category_slug"] = user_message
if "category_slug" in details["missing_fields"]:
details["missing_fields"].remove("category_slug")
# 3. If other fields still missing, prompt for them
if details["missing_fields"]:
prompt = generate_confirmation_prompt(details)
if prompt:
return prompt
else:
# Use LLM to determine if this is actually a goal-setting intent
if await is_goal_setting_intent(user_message, history):
details = await extract_goal_details(user_message, history)
if details["missing_fields"]:
prompt = generate_confirmation_prompt(details)
if prompt:
return prompt
# Otherwise, continue with normal conversation flow
# ---- END: Improved Goal Clarification Logic ----
lc_messages = []
context_text = format_profile_goals_and_moods(user_data) if user_data else ""
persona_prompt = PERSONA_PROMPTS.get(agent_type, PERSONA_PROMPTS["main"])
lc_messages.append(SystemMessage(content=f"{context_text}\n{persona_prompt}"))
for h in history:
if h["role"] == "user":
lc_messages.append(HumanMessage(content=h["content"]))
else:
lc_messages.append(AIMessage(content=h["content"]))
model_router = {
"physical": deepseek_with_tools,
"mental": gpt4o_with_tools,
"spiritual": gpt4o_with_tools,
"vocational": gpt4o_with_tools,
"environmental": deepseek_with_tools,
"financial": gpt4o_with_tools,
"social": gpt4o_with_tools,
"intellectual": gpt4o_with_tools,
"main": gpt4o_mini_with_tools,
}
model = model_router.get(agent_type, gpt4o_with_tools)
try:
response = await model.ainvoke(lc_messages)
if hasattr(response, "tool_calls") and response.tool_calls:
tool_results = []
for tool_call in response.tool_calls:
result = await execute_tool_call(tool_call, user_id)
tool_results.append(result)
lc_messages.append(response)
for i, tool_call in enumerate(response.tool_calls):
tool_result = tool_results[i]
tool_message = ToolMessage(
content=str(tool_result),
tool_call_id=tool_call["id"]
)
lc_messages.append(tool_message)
final_response = await model.ainvoke(lc_messages)
if hasattr(final_response, 'content') and final_response.content:
return final_response.content
else:
if tool_results and isinstance(tool_results[0], dict):
if "error" not in tool_results[0]:
return f"I had trouble adding that goal: Could you clarify your goal or try again?"
return "I've noted your goal request. What would you like to work on next?"
if hasattr(response, 'content') and response.content:
return response.content
else:
return "I'm here to help with your wellness journey! What would you like to work on today?"
except Exception as model_error:
print(f"Model invocation error: {model_error}")
import traceback
traceback.print_exc()
print(f"[DEBUG] Using model: {model}")
print(f"[DEBUG] Message history length: {len(lc_messages)}")
print(f"[DEBUG] User data size: {len(str(user_data)) if user_data else 0}")
return "I'm having trouble processing that right now. Could you try rephrasing your request?"
async def generate_chat_summary(messages):
"""
Generate a short title/summary from recent chat messages.
"""
lc_messages = [
SystemMessage(
content=(
"You're a helpful assistant that creates short, concise titles (max 4 words) "
"to summarize a conversation. Respond with only the title text."
)
)
]
# Add only first few user+bot messages
for msg in messages[:6]: # up to 3 pairs
role = msg.get("role")
content = msg.get("content")
if role == "user":
lc_messages.append(HumanMessage(content=content))
elif role == "assistant":
lc_messages.append(AIMessage(content=content))
try:
response = await gpt4o_with_tools.ainvoke(lc_messages)
summary = response.content.strip().strip('"') # Remove extra quotes
return summary[:50] or "Chat Summary"
except Exception as e:
print("Summary generation failed:", e)
return "Chat Summary"
|