Spaces:
Sleeping
Sleeping
import os | |
import re | |
from datetime import datetime, timedelta | |
from typing import TypedDict, Annotated | |
import sympy as sp | |
import math | |
from langchain_openai import ChatOpenAI | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
from langchain_core.messages import HumanMessage, SystemMessage | |
from langgraph.graph import StateGraph, MessagesState, START, END | |
from langgraph.prebuilt import ToolNode | |
from langgraph.checkpoint.memory import MemorySaver | |
import json | |
# Load environment variables | |
from dotenv import load_dotenv | |
load_dotenv() | |
def read_system_prompt(): | |
"""Read the system prompt from file""" | |
try: | |
with open('system_prompt.txt', 'r') as f: | |
return f.read().strip() | |
except FileNotFoundError: | |
return """You are a helpful assistant tasked with answering questions using a set of tools. | |
Now, I will ask you a question. Report your thoughts, and finish your answer with the following template: | |
FINAL ANSWER: [YOUR FINAL ANSWER]. | |
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. | |
Your answer should only start with "FINAL ANSWER: ", then follows with the answer.""" | |
def math_calculator(expression: str) -> str: | |
""" | |
Advanced mathematical calculator that can handle complex expressions, | |
equations, symbolic math, calculus, and more using SymPy. | |
""" | |
try: | |
# Clean the expression | |
expression = expression.strip() | |
# Handle common mathematical operations and functions | |
expression = expression.replace('^', '**') # Convert ^ to ** | |
expression = expression.replace('ln', 'log') # Natural log | |
# Try to evaluate as a symbolic expression first | |
try: | |
result = sp.sympify(expression) | |
# If it's a symbolic expression that can be simplified | |
simplified = sp.simplify(result) | |
# Try to get numerical value | |
try: | |
numerical = float(simplified.evalf()) | |
return str(numerical) | |
except: | |
return str(simplified) | |
except: | |
# Fall back to basic evaluation | |
# Replace common math functions | |
safe_expression = expression | |
for func in ['sin', 'cos', 'tan', 'sqrt', 'log', 'exp', 'abs']: | |
safe_expression = safe_expression.replace(func, f'math.{func}') | |
# Evaluate safely | |
result = eval(safe_expression, {"__builtins__": {}}, { | |
"math": math, | |
"pi": math.pi, | |
"e": math.e | |
}) | |
return str(result) | |
except Exception as e: | |
return f"Error calculating '{expression}': {str(e)}" | |
def date_time_processor(query: str) -> str: | |
""" | |
Process date and time related queries, calculations, and conversions. | |
""" | |
try: | |
current_time = datetime.now() | |
query_lower = query.lower() | |
# Current date/time queries | |
if 'current' in query_lower or 'today' in query_lower or 'now' in query_lower: | |
if 'date' in query_lower: | |
return current_time.strftime('%Y-%m-%d') | |
elif 'time' in query_lower: | |
return current_time.strftime('%H:%M:%S') | |
else: | |
return current_time.strftime('%Y-%m-%d %H:%M:%S') | |
# Day of week queries | |
if 'day of week' in query_lower or 'what day' in query_lower: | |
return current_time.strftime('%A') | |
# Year queries | |
if 'year' in query_lower and 'current' in query_lower: | |
return str(current_time.year) | |
# Month queries | |
if 'month' in query_lower and 'current' in query_lower: | |
return current_time.strftime('%B') | |
# Date arithmetic (simple cases) | |
if 'days ago' in query_lower: | |
days_match = re.search(r'(\d+)\s+days?\s+ago', query_lower) | |
if days_match: | |
days = int(days_match.group(1)) | |
past_date = current_time - timedelta(days=days) | |
return past_date.strftime('%Y-%m-%d') | |
if 'days from now' in query_lower or 'days later' in query_lower: | |
days_match = re.search(r'(\d+)\s+days?\s+(?:from now|later)', query_lower) | |
if days_match: | |
days = int(days_match.group(1)) | |
future_date = current_time + timedelta(days=days) | |
return future_date.strftime('%Y-%m-%d') | |
# If no specific pattern matched, return current datetime | |
return f"Current date and time: {current_time.strftime('%Y-%m-%d %H:%M:%S')}" | |
except Exception as e: | |
return f"Error processing date/time query: {str(e)}" | |
# Define the agent state | |
class AgentState(TypedDict): | |
messages: Annotated[list, "The messages in the conversation"] | |
class GAIAAgent: | |
def __init__(self): | |
# Check for required API keys | |
openai_key = os.getenv("OPENAI_API_KEY") | |
tavily_key = os.getenv("TAVILY_API_KEY") | |
if not openai_key: | |
raise ValueError("OPENAI_API_KEY environment variable is required") | |
if not tavily_key: | |
print("⚠️ TAVILY_API_KEY not found - web search will be disabled") | |
self.has_search = False | |
else: | |
self.has_search = True | |
print("✅ Initializing GAIA agent...") | |
# Initialize LLM (using OpenAI GPT-4) | |
self.llm = ChatOpenAI( | |
model="gpt-4o-mini", | |
temperature=0, | |
openai_api_key=openai_key | |
) | |
# Initialize tools only if we have Tavily key | |
self.tools = [] | |
if self.has_search: | |
self.search_tool = TavilySearchResults( | |
max_results=5, | |
tavily_api_key=tavily_key | |
) | |
self.tools = [self.search_tool] | |
# Create LLM with tools (only if we have tools) | |
if self.tools: | |
self.llm_with_tools = self.llm.bind_tools(self.tools) | |
else: | |
self.llm_with_tools = self.llm | |
# Build the graph | |
self.graph = self._build_graph() | |
self.system_prompt = read_system_prompt() | |
def _build_graph(self): | |
"""Build the LangGraph workflow""" | |
def agent_node(state: AgentState): | |
"""Main agent reasoning node""" | |
messages = state["messages"] | |
# Add system message if not present at the beginning | |
if not any(isinstance(msg, SystemMessage) for msg in messages): | |
system_msg = SystemMessage(content=self.system_prompt) | |
messages = [system_msg] + messages | |
# Get the original question (the first HumanMessage) | |
original_question = None | |
for msg in messages: | |
if isinstance(msg, HumanMessage): | |
original_question = msg.content | |
break | |
# Check if this is a fresh question (not after tool calls) | |
last_msg = messages[-1] | |
is_fresh_question = isinstance(last_msg, HumanMessage) | |
# Only do special processing for fresh questions | |
if is_fresh_question and original_question: | |
# Check if this is a math problem | |
if self._is_math_problem(original_question): | |
try: | |
math_result = math_calculator(original_question) | |
enhanced_msg = f"Question: {original_question}\n\nMath calculation result: {math_result}\n\nBased on this calculation, provide your final answer using the format: FINAL ANSWER: [your answer]" | |
messages[-1] = HumanMessage(content=enhanced_msg) | |
except Exception as e: | |
print(f"Math calculation error: {e}") | |
# Check if this is a date/time problem | |
elif self._is_datetime_problem(original_question): | |
try: | |
datetime_result = date_time_processor(original_question) | |
enhanced_msg = f"Question: {original_question}\n\nDate/time processing result: {datetime_result}\n\nBased on this information, provide your final answer using the format: FINAL ANSWER: [your answer]" | |
messages[-1] = HumanMessage(content=enhanced_msg) | |
except Exception as e: | |
print(f"DateTime processing error: {e}") | |
try: | |
response = self.llm_with_tools.invoke(messages) | |
return {"messages": messages + [response]} | |
except Exception as e: | |
print(f"LLM invocation error: {e}") | |
# Return a simple response on error | |
error_response = HumanMessage(content=f"FINAL ANSWER: Error processing question: {str(e)}") | |
return {"messages": messages + [error_response]} | |
def tool_node(state: AgentState): | |
"""Tool execution node""" | |
try: | |
tool_node_instance = ToolNode(self.tools) | |
result = tool_node_instance.invoke(state) | |
return result | |
except Exception as e: | |
print(f"Tool execution error: {e}") | |
# Add an error message and continue | |
messages = state["messages"] | |
error_msg = HumanMessage(content=f"Tool execution failed: {str(e)}. Please provide your best answer without tools.") | |
return {"messages": messages + [error_msg]} | |
def should_continue(state: AgentState): | |
"""Decide whether to continue or end""" | |
try: | |
last_message = state["messages"][-1] | |
# If we don't have tools, just end | |
if not self.tools: | |
return "end" | |
# If the last message has tool calls, continue to tools | |
if hasattr(last_message, 'tool_calls') and last_message.tool_calls: | |
return "tools" | |
# If we have a final answer, end | |
if (hasattr(last_message, 'content') and | |
last_message.content and | |
"FINAL ANSWER:" in str(last_message.content)): | |
return "end" | |
# Check if we've had too many iterations (prevent infinite loops) | |
if len(state["messages"]) > 10: | |
return "end" | |
# Otherwise end (be conservative) | |
return "end" | |
except Exception as e: | |
print(f"Should continue error: {e}") | |
return "end" | |
# Build the graph | |
workflow = StateGraph(AgentState) | |
# Add nodes | |
workflow.add_node("agent", agent_node) | |
workflow.add_node("tools", tool_node) | |
# Add edges | |
workflow.add_edge(START, "agent") | |
workflow.add_conditional_edges("agent", should_continue, { | |
"tools": "tools", | |
"end": END | |
}) | |
workflow.add_edge("tools", "agent") | |
# Compile without checkpointer to avoid state issues | |
return workflow.compile() | |
def _is_math_problem(self, text: str) -> bool: | |
"""Check if the text contains mathematical expressions""" | |
math_indicators = [ | |
'+', '-', '*', '/', '^', '=', 'calculate', 'compute', | |
'solve', 'equation', 'integral', 'derivative', 'sum', | |
'sqrt', 'log', 'sin', 'cos', 'tan', 'exp' | |
] | |
text_lower = text.lower() | |
return any(indicator in text_lower for indicator in math_indicators) or \ | |
re.search(r'\d+[\+\-\*/\^]\d+', text) is not None | |
def _is_datetime_problem(self, text: str) -> bool: | |
"""Check if the text contains date/time related queries""" | |
datetime_indicators = [ | |
'date', 'time', 'day', 'month', 'year', 'today', 'yesterday', | |
'tomorrow', 'current', 'now', 'ago', 'later', 'when' | |
] | |
text_lower = text.lower() | |
return any(indicator in text_lower for indicator in datetime_indicators) | |
def __call__(self, question: str) -> str: | |
"""Process a question and return the answer""" | |
try: | |
print(f"Processing question: {question[:100]}...") | |
# Check for file/media requirements that we can't handle | |
if any(indicator in question.lower() for indicator in [ | |
'attached', 'audio', 'video', 'image', 'file', 'mp3', 'pdf', | |
'excel', 'spreadsheet', 'listen to', 'watch', 'download' | |
]): | |
return "Unable to process files or media attachments" | |
# Create initial state | |
initial_state = { | |
"messages": [HumanMessage(content=question)] | |
} | |
# Run the graph | |
final_state = self.graph.invoke(initial_state) | |
# Extract the final answer | |
last_message = final_state["messages"][-1] | |
response_content = last_message.content if hasattr(last_message, 'content') else str(last_message) | |
# Extract just the final answer part | |
final_answer = self._extract_final_answer(response_content) | |
print(f"Final answer: {final_answer}") | |
return final_answer | |
except Exception as e: | |
print(f"Error processing question: {e}") | |
# Try to provide a meaningful fallback | |
if "api" in str(e).lower() or "key" in str(e).lower(): | |
return "Error: API key configuration issue" | |
elif "tool" in str(e).lower(): | |
return "Error: Tool execution issue" | |
else: | |
return f"Unable to process question due to technical error" | |
def _extract_final_answer(self, response: str) -> str: | |
"""Extract the final answer from the response""" | |
if "FINAL ANSWER:" in response: | |
# Find the final answer part | |
parts = response.split("FINAL ANSWER:") | |
if len(parts) > 1: | |
answer = parts[-1].strip() | |
# Remove any trailing punctuation or explanations | |
answer = answer.split('\n')[0].strip() | |
return answer | |
# If no FINAL ANSWER format found, return the whole response | |
return response.strip() | |
# Create a function to get the agent (for use in app.py) | |
def create_agent(): | |
"""Factory function to create the GAIA agent""" | |
return GAIAAgent() |