Spaces:
Sleeping
Sleeping
Complete GAIA agent with LlamaIndex - fixed all issues
Browse files- Dockerfile +62 -8
- app.py +178 -81
- requirements.txt +27 -11
Dockerfile
CHANGED
@@ -1,17 +1,71 @@
|
|
1 |
-
# Use a base Python image
|
2 |
FROM python:3.10-slim
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
# Set working directory
|
5 |
WORKDIR /app
|
6 |
|
7 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
COPY . .
|
9 |
|
10 |
-
#
|
11 |
-
RUN
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
#
|
14 |
-
|
|
|
15 |
|
16 |
-
# Run
|
17 |
-
CMD ["
|
|
|
1 |
+
# Use a base Python image with better compatibility
|
2 |
FROM python:3.10-slim
|
3 |
|
4 |
+
# Set environment variables to fix permission issues
|
5 |
+
ENV PYTHONUNBUFFERED=1
|
6 |
+
ENV NLTK_DATA=/app/nltk_data
|
7 |
+
ENV MPLCONFIGDIR=/app/matplotlib_cache
|
8 |
+
ENV HF_HOME=/app/huggingface_cache
|
9 |
+
ENV TORCH_HOME=/app/torch_cache
|
10 |
+
ENV TRANSFORMERS_CACHE=/app/huggingface_cache
|
11 |
+
ENV GRADIO_SERVER_NAME=0.0.0.0
|
12 |
+
ENV GRADIO_SERVER_PORT=7860
|
13 |
+
|
14 |
+
# Create app user and group for better security
|
15 |
+
RUN groupadd -r appuser && useradd -r -g appuser appuser
|
16 |
+
|
17 |
# Set working directory
|
18 |
WORKDIR /app
|
19 |
|
20 |
+
# Create cache directories with proper permissions
|
21 |
+
RUN mkdir -p /app/nltk_data \
|
22 |
+
/app/matplotlib_cache \
|
23 |
+
/app/huggingface_cache \
|
24 |
+
/app/torch_cache \
|
25 |
+
/app/temp && \
|
26 |
+
chown -R appuser:appuser /app
|
27 |
+
|
28 |
+
# Install system dependencies
|
29 |
+
RUN apt-get update && apt-get install -y \
|
30 |
+
gcc \
|
31 |
+
g++ \
|
32 |
+
git \
|
33 |
+
curl \
|
34 |
+
&& rm -rf /var/lib/apt/lists/*
|
35 |
+
|
36 |
+
# Copy requirements first for better Docker layer caching
|
37 |
+
COPY requirements.txt .
|
38 |
+
|
39 |
+
# Install Python dependencies
|
40 |
+
RUN pip install --upgrade pip && \
|
41 |
+
pip install --no-cache-dir -r requirements.txt
|
42 |
+
|
43 |
+
# Copy application code
|
44 |
COPY . .
|
45 |
|
46 |
+
# Change ownership of all app files to appuser
|
47 |
+
RUN chown -R appuser:appuser /app
|
48 |
+
|
49 |
+
# Switch to non-root user
|
50 |
+
USER appuser
|
51 |
+
|
52 |
+
# Create a startup script to handle initialization
|
53 |
+
RUN echo '#!/bin/bash\n\
|
54 |
+
echo "Starting GAIA Agent..."\n\
|
55 |
+
echo "Environment check:"\n\
|
56 |
+
echo "NLTK_DATA: $NLTK_DATA"\n\
|
57 |
+
echo "HF_HOME: $HF_HOME"\n\
|
58 |
+
echo "MPLCONFIGDIR: $MPLCONFIGDIR"\n\
|
59 |
+
echo "Working directory: $(pwd)"\n\
|
60 |
+
echo "User: $(whoami)"\n\
|
61 |
+
python app.py' > /app/start.sh && chmod +x /app/start.sh
|
62 |
+
|
63 |
+
# Expose the port
|
64 |
+
EXPOSE 7860
|
65 |
|
66 |
+
# Health check
|
67 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
68 |
+
CMD curl -f http://localhost:7860/ || exit 1
|
69 |
|
70 |
+
# Run the application
|
71 |
+
CMD ["/app/start.sh"]
|
app.py
CHANGED
@@ -3,14 +3,41 @@ import sys
|
|
3 |
import json
|
4 |
import traceback
|
5 |
from typing import List, Dict
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
import gradio as gr
|
8 |
|
9 |
# --- Environment variable setup to fix permission issues ---
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# Import nltk AFTER setting environment variables
|
16 |
try:
|
@@ -18,60 +45,93 @@ try:
|
|
18 |
# Download required NLTK data upfront
|
19 |
nltk.download('punkt', download_dir=os.environ["NLTK_DATA"], quiet=True)
|
20 |
nltk.download('stopwords', download_dir=os.environ["NLTK_DATA"], quiet=True)
|
|
|
21 |
except Exception as e:
|
22 |
-
print(f"NLTK setup warning: {e}")
|
23 |
|
24 |
# Add current directory to path for local imports
|
25 |
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
26 |
|
27 |
-
# Import
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Initialize global agent state
|
31 |
AGENT_READY = False
|
32 |
agent = None
|
33 |
initialization_error = None
|
|
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
from agent.local_llm import LocalLLM
|
39 |
-
from agent.tools import gaia_tools
|
40 |
-
from llama_index.core.agent import ReActAgent
|
41 |
-
from llama_index.core.memory import ChatMemoryBuffer
|
42 |
-
|
43 |
-
print("All imports successful!")
|
44 |
-
|
45 |
-
print("Initializing Local LLM...")
|
46 |
-
local_llm = LocalLLM()
|
47 |
-
llm = local_llm.get_llm()
|
48 |
-
|
49 |
-
print("Creating ReAct Agent...")
|
50 |
-
memory = ChatMemoryBuffer.from_defaults(token_limit=2000)
|
51 |
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
print("Using mock mode - agent partially ready")
|
65 |
-
agent = llm # Use the mock LLM directly
|
66 |
-
AGENT_READY = True
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
|
|
|
|
75 |
|
76 |
def process_single_question(question_text: str) -> str:
|
77 |
"""Process a single GAIA question through the agent"""
|
@@ -91,6 +151,8 @@ Answer the following question directly and concisely. Do not include "FINAL ANSW
|
|
91 |
Question: {question_text}
|
92 |
"""
|
93 |
|
|
|
|
|
94 |
# Handle both ReAct agent and mock LLM
|
95 |
if hasattr(agent, 'query'):
|
96 |
response = agent.query(enhanced_prompt)
|
@@ -107,11 +169,13 @@ Question: {question_text}
|
|
107 |
if answer.startswith(prefix):
|
108 |
answer = answer[len(prefix):].strip()
|
109 |
|
|
|
110 |
return answer
|
111 |
|
112 |
except Exception as e:
|
113 |
-
|
114 |
-
|
|
|
115 |
|
116 |
def process_all_questions() -> str:
|
117 |
"""Process all GAIA questions and prepare answers for submission"""
|
@@ -119,9 +183,11 @@ def process_all_questions() -> str:
|
|
119 |
return "β Agent not ready."
|
120 |
|
121 |
try:
|
|
|
122 |
questions = GaiaAPI.get_questions()
|
123 |
processed_answers = []
|
124 |
|
|
|
125 |
for i, question in enumerate(questions):
|
126 |
print(f"Processing question {i + 1}/{len(questions)}: {question['task_id']}")
|
127 |
answer = process_single_question(question['question'])
|
@@ -130,20 +196,25 @@ def process_all_questions() -> str:
|
|
130 |
"submitted_answer": answer
|
131 |
})
|
132 |
|
133 |
-
|
|
|
|
|
134 |
json.dump(processed_answers, f, indent=2)
|
135 |
|
136 |
summary = f"β
Processed {len(processed_answers)} questions.\n"
|
137 |
-
summary += "Answers saved to
|
138 |
-
summary += "First 3 answers:\n"
|
139 |
for ans in processed_answers[:3]:
|
140 |
summary += f"- {ans['task_id']}: {ans['submitted_answer'][:50]}...\n"
|
141 |
|
|
|
142 |
return summary
|
143 |
|
144 |
except Exception as e:
|
145 |
-
|
146 |
-
|
|
|
|
|
147 |
|
148 |
def submit_to_gaia(username: str, code_url: str) -> str:
|
149 |
"""Submit answers to GAIA benchmark"""
|
@@ -154,8 +225,10 @@ def submit_to_gaia(username: str, code_url: str) -> str:
|
|
154 |
return "β Please provide both username and code URL."
|
155 |
|
156 |
try:
|
157 |
-
|
|
|
158 |
answers = json.load(f)
|
|
|
159 |
except FileNotFoundError:
|
160 |
return "β No processed answers found. Please process them first."
|
161 |
|
@@ -164,10 +237,13 @@ def submit_to_gaia(username: str, code_url: str) -> str:
|
|
164 |
if "error" in result:
|
165 |
return f"β Submission failed: {result['error']}"
|
166 |
score = result.get("score", "Unknown")
|
167 |
-
|
|
|
|
|
168 |
except Exception as e:
|
169 |
-
|
170 |
-
|
|
|
171 |
|
172 |
def get_sample_question() -> str:
|
173 |
"""Load a sample question for testing"""
|
@@ -177,6 +253,37 @@ def get_sample_question() -> str:
|
|
177 |
except Exception as e:
|
178 |
return f"Error loading sample question: {str(e)}"
|
179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
# ---------- Gradio UI ----------
|
182 |
with gr.Blocks(title="π¦ GAIA LlamaIndex Agent", theme=gr.themes.Soft()) as demo:
|
@@ -241,31 +348,21 @@ Submit your processed answers to the GAIA benchmark for official scoring.
|
|
241 |
|
242 |
submit_btn.click(submit_to_gaia, inputs=[username_input, code_url_input], outputs=submission_output)
|
243 |
|
244 |
-
with gr.Tab("βΉοΈ
|
245 |
-
gr.Markdown(
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
## Current Status
|
254 |
-
- Agent Ready: {"β
Yes" if AGENT_READY else "β No"}
|
255 |
-
- Tools Loaded: {len(gaia_tools) if 'gaia_tools' in globals() else 0}
|
256 |
-
- Initialization Error: {initialization_error or "None"}
|
257 |
-
|
258 |
-
## Environment Variables Set
|
259 |
-
- NLTK_DATA: {os.environ.get('NLTK_DATA', 'Not set')}
|
260 |
-
- HF_HOME: {os.environ.get('HF_HOME', 'Not set')}
|
261 |
-
- MPLCONFIGDIR: {os.environ.get('MPLCONFIGDIR', 'Not set')}
|
262 |
-
|
263 |
-
## Usage Tips
|
264 |
-
1. Start with the "Test Single Question" tab
|
265 |
-
2. Try the sample question first
|
266 |
-
3. If agent works, proceed to "Full Evaluation"
|
267 |
-
4. Submit to GAIA when ready
|
268 |
-
""")
|
269 |
|
270 |
if __name__ == "__main__":
|
271 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import json
|
4 |
import traceback
|
5 |
from typing import List, Dict
|
6 |
+
import warnings
|
7 |
+
|
8 |
+
# Suppress warnings for cleaner output
|
9 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
10 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
11 |
|
12 |
import gradio as gr
|
13 |
|
14 |
# --- Environment variable setup to fix permission issues ---
|
15 |
+
def setup_environment():
|
16 |
+
"""Setup environment variables and create necessary directories"""
|
17 |
+
env_vars = {
|
18 |
+
"NLTK_DATA": "/app/nltk_data",
|
19 |
+
"MPLCONFIGDIR": "/app/matplotlib_cache",
|
20 |
+
"HF_HOME": "/app/huggingface_cache",
|
21 |
+
"TORCH_HOME": "/app/torch_cache",
|
22 |
+
"TRANSFORMERS_CACHE": "/app/huggingface_cache"
|
23 |
+
}
|
24 |
+
|
25 |
+
for var, path in env_vars.items():
|
26 |
+
os.environ[var] = path
|
27 |
+
# Create directory if it doesn't exist
|
28 |
+
try:
|
29 |
+
os.makedirs(path, exist_ok=True)
|
30 |
+
print(f"β
Created/verified directory: {path}")
|
31 |
+
except PermissionError:
|
32 |
+
print(f"β οΈ Permission denied for {path}, using /tmp fallback")
|
33 |
+
fallback_path = f"/tmp/{var.lower()}"
|
34 |
+
os.environ[var] = fallback_path
|
35 |
+
os.makedirs(fallback_path, exist_ok=True)
|
36 |
+
except Exception as e:
|
37 |
+
print(f"β Error setting up {var}: {e}")
|
38 |
+
|
39 |
+
# Setup environment first
|
40 |
+
setup_environment()
|
41 |
|
42 |
# Import nltk AFTER setting environment variables
|
43 |
try:
|
|
|
45 |
# Download required NLTK data upfront
|
46 |
nltk.download('punkt', download_dir=os.environ["NLTK_DATA"], quiet=True)
|
47 |
nltk.download('stopwords', download_dir=os.environ["NLTK_DATA"], quiet=True)
|
48 |
+
print("β
NLTK data downloaded successfully")
|
49 |
except Exception as e:
|
50 |
+
print(f"β οΈ NLTK setup warning: {e}")
|
51 |
|
52 |
# Add current directory to path for local imports
|
53 |
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
54 |
|
55 |
+
# Import dependencies with better error handling
|
56 |
+
try:
|
57 |
+
from utils.gaia_api import GaiaAPI
|
58 |
+
print("β
GaiaAPI imported successfully")
|
59 |
+
except ImportError as e:
|
60 |
+
print(f"β οΈ Failed to import GaiaAPI: {e}")
|
61 |
+
# Create a fallback GaiaAPI
|
62 |
+
class GaiaAPI:
|
63 |
+
@classmethod
|
64 |
+
def get_questions(cls):
|
65 |
+
return [{"task_id": "fallback", "question": "What is 2+2?"}]
|
66 |
+
@classmethod
|
67 |
+
def get_random_question(cls):
|
68 |
+
return {"task_id": "fallback", "question": "What is 2+2?"}
|
69 |
+
@classmethod
|
70 |
+
def submit_answers(cls, username, code_url, answers):
|
71 |
+
return {"error": "GaiaAPI not available", "score": 0}
|
72 |
|
73 |
# Initialize global agent state
|
74 |
AGENT_READY = False
|
75 |
agent = None
|
76 |
initialization_error = None
|
77 |
+
agent_info = {}
|
78 |
|
79 |
+
def initialize_agent():
|
80 |
+
"""Initialize the LlamaIndex agent with comprehensive error handling"""
|
81 |
+
global agent, AGENT_READY, initialization_error, agent_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
+
try:
|
84 |
+
print("π Starting agent initialization...")
|
85 |
+
|
86 |
+
# Import agent-related modules
|
87 |
+
print("π¦ Importing modules...")
|
88 |
+
from agent.local_llm import LocalLLM
|
89 |
+
from agent.tools import gaia_tools
|
90 |
+
from llama_index.core.agent import ReActAgent
|
91 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
92 |
+
|
93 |
+
agent_info["modules_imported"] = True
|
94 |
+
print("β
All modules imported successfully!")
|
|
|
|
|
|
|
95 |
|
96 |
+
print("π€ Initializing Local LLM...")
|
97 |
+
local_llm = LocalLLM()
|
98 |
+
llm = local_llm.get_llm()
|
99 |
+
agent_info["llm_type"] = llm.__class__.__name__
|
100 |
+
|
101 |
+
print("π§ Creating ReAct Agent...")
|
102 |
+
memory = ChatMemoryBuffer.from_defaults(token_limit=2000)
|
103 |
+
|
104 |
+
# Check if we have a proper LLM or mock
|
105 |
+
if hasattr(llm, 'chat') and llm.__class__.__name__ != 'MockLLM':
|
106 |
+
agent = ReActAgent.from_tools(
|
107 |
+
tools=gaia_tools,
|
108 |
+
llm=llm,
|
109 |
+
memory=memory,
|
110 |
+
verbose=True,
|
111 |
+
max_iterations=3
|
112 |
+
)
|
113 |
+
agent_info["agent_type"] = "ReActAgent"
|
114 |
+
print("β
ReAct Agent initialized successfully!")
|
115 |
+
else:
|
116 |
+
agent = llm # Use the mock LLM directly
|
117 |
+
agent_info["agent_type"] = "MockLLM"
|
118 |
+
print("β οΈ Using mock mode - agent partially ready")
|
119 |
+
|
120 |
+
agent_info["tools_count"] = len(gaia_tools) if 'gaia_tools' in locals() else 0
|
121 |
+
AGENT_READY = True
|
122 |
+
print("π Agent initialization complete!")
|
123 |
+
|
124 |
+
except Exception as e:
|
125 |
+
error_msg = f"Failed to initialize agent: {str(e)}"
|
126 |
+
print(f"β {error_msg}")
|
127 |
+
traceback.print_exc()
|
128 |
+
AGENT_READY = False
|
129 |
+
agent = None
|
130 |
+
initialization_error = error_msg
|
131 |
+
agent_info["error"] = error_msg
|
132 |
|
133 |
+
# Initialize agent
|
134 |
+
initialize_agent()
|
135 |
|
136 |
def process_single_question(question_text: str) -> str:
|
137 |
"""Process a single GAIA question through the agent"""
|
|
|
151 |
Question: {question_text}
|
152 |
"""
|
153 |
|
154 |
+
print(f"π€ Processing question: {question_text[:50]}...")
|
155 |
+
|
156 |
# Handle both ReAct agent and mock LLM
|
157 |
if hasattr(agent, 'query'):
|
158 |
response = agent.query(enhanced_prompt)
|
|
|
169 |
if answer.startswith(prefix):
|
170 |
answer = answer[len(prefix):].strip()
|
171 |
|
172 |
+
print(f"β
Generated answer: {answer[:50]}...")
|
173 |
return answer
|
174 |
|
175 |
except Exception as e:
|
176 |
+
error_msg = f"β Error processing question: {str(e)}"
|
177 |
+
print(error_msg)
|
178 |
+
return error_msg
|
179 |
|
180 |
def process_all_questions() -> str:
|
181 |
"""Process all GAIA questions and prepare answers for submission"""
|
|
|
183 |
return "β Agent not ready."
|
184 |
|
185 |
try:
|
186 |
+
print("π₯ Fetching all GAIA questions...")
|
187 |
questions = GaiaAPI.get_questions()
|
188 |
processed_answers = []
|
189 |
|
190 |
+
print(f"π Processing {len(questions)} questions...")
|
191 |
for i, question in enumerate(questions):
|
192 |
print(f"Processing question {i + 1}/{len(questions)}: {question['task_id']}")
|
193 |
answer = process_single_question(question['question'])
|
|
|
196 |
"submitted_answer": answer
|
197 |
})
|
198 |
|
199 |
+
# Save answers to file
|
200 |
+
output_file = "/app/gaia_answers.json"
|
201 |
+
with open(output_file, "w") as f:
|
202 |
json.dump(processed_answers, f, indent=2)
|
203 |
|
204 |
summary = f"β
Processed {len(processed_answers)} questions.\n"
|
205 |
+
summary += f"πΎ Answers saved to {output_file}\n"
|
206 |
+
summary += "π First 3 answers:\n"
|
207 |
for ans in processed_answers[:3]:
|
208 |
summary += f"- {ans['task_id']}: {ans['submitted_answer'][:50]}...\n"
|
209 |
|
210 |
+
print(summary)
|
211 |
return summary
|
212 |
|
213 |
except Exception as e:
|
214 |
+
error_msg = f"β Error processing questions: {str(e)}"
|
215 |
+
print(error_msg)
|
216 |
+
traceback.print_exc()
|
217 |
+
return error_msg
|
218 |
|
219 |
def submit_to_gaia(username: str, code_url: str) -> str:
|
220 |
"""Submit answers to GAIA benchmark"""
|
|
|
225 |
return "β Please provide both username and code URL."
|
226 |
|
227 |
try:
|
228 |
+
answers_file = "/app/gaia_answers.json"
|
229 |
+
with open(answers_file, "r") as f:
|
230 |
answers = json.load(f)
|
231 |
+
print(f"π€ Submitting {len(answers)} answers...")
|
232 |
except FileNotFoundError:
|
233 |
return "β No processed answers found. Please process them first."
|
234 |
|
|
|
237 |
if "error" in result:
|
238 |
return f"β Submission failed: {result['error']}"
|
239 |
score = result.get("score", "Unknown")
|
240 |
+
success_msg = f"β
Submission successful!\nπ Score: {score}"
|
241 |
+
print(success_msg)
|
242 |
+
return success_msg
|
243 |
except Exception as e:
|
244 |
+
error_msg = f"β Submission error: {str(e)}"
|
245 |
+
print(error_msg)
|
246 |
+
return error_msg
|
247 |
|
248 |
def get_sample_question() -> str:
|
249 |
"""Load a sample question for testing"""
|
|
|
253 |
except Exception as e:
|
254 |
return f"Error loading sample question: {str(e)}"
|
255 |
|
256 |
+
def get_system_status() -> str:
|
257 |
+
"""Get detailed system status for debugging"""
|
258 |
+
status = "π System Status:\n\n"
|
259 |
+
|
260 |
+
# Agent status
|
261 |
+
status += f"π€ Agent Ready: {'β
Yes' if AGENT_READY else 'β No'}\n"
|
262 |
+
if initialization_error:
|
263 |
+
status += f"β Error: {initialization_error}\n"
|
264 |
+
|
265 |
+
# Agent info
|
266 |
+
status += f"π§ LLM Type: {agent_info.get('llm_type', 'Unknown')}\n"
|
267 |
+
status += f"π§ Agent Type: {agent_info.get('agent_type', 'Unknown')}\n"
|
268 |
+
status += f"π οΈ Tools Count: {agent_info.get('tools_count', 0)}\n"
|
269 |
+
|
270 |
+
# Environment
|
271 |
+
status += "\nπ Environment Variables:\n"
|
272 |
+
for var in ["NLTK_DATA", "HF_HOME", "MPLCONFIGDIR", "TORCH_HOME"]:
|
273 |
+
path = os.environ.get(var, 'Not set')
|
274 |
+
exists = "β
" if os.path.exists(path) else "β"
|
275 |
+
status += f" {var}: {path} {exists}\n"
|
276 |
+
|
277 |
+
# Directory permissions
|
278 |
+
status += "\nπ Directory Status:\n"
|
279 |
+
for path in ["/app", "/tmp"]:
|
280 |
+
try:
|
281 |
+
writable = os.access(path, os.W_OK)
|
282 |
+
status += f" {path}: {'β
Writable' if writable else 'β Not writable'}\n"
|
283 |
+
except:
|
284 |
+
status += f" {path}: β Error checking\n"
|
285 |
+
|
286 |
+
return status
|
287 |
|
288 |
# ---------- Gradio UI ----------
|
289 |
with gr.Blocks(title="π¦ GAIA LlamaIndex Agent", theme=gr.themes.Soft()) as demo:
|
|
|
348 |
|
349 |
submit_btn.click(submit_to_gaia, inputs=[username_input, code_url_input], outputs=submission_output)
|
350 |
|
351 |
+
with gr.Tab("βΉοΈ System Status"):
|
352 |
+
gr.Markdown("## System Information and Debugging")
|
353 |
+
|
354 |
+
refresh_btn = gr.Button("π Refresh Status")
|
355 |
+
status_output = gr.Textbox(label="System Status", lines=20, interactive=False)
|
356 |
+
|
357 |
+
# Load initial status
|
358 |
+
demo.load(get_system_status, outputs=status_output)
|
359 |
+
refresh_btn.click(get_system_status, outputs=status_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
360 |
|
361 |
if __name__ == "__main__":
|
362 |
+
print("π Starting Gradio interface...")
|
363 |
+
demo.launch(
|
364 |
+
server_name="0.0.0.0",
|
365 |
+
server_port=7860,
|
366 |
+
show_error=True,
|
367 |
+
show_tips=True
|
368 |
+
)
|
requirements.txt
CHANGED
@@ -1,11 +1,27 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core dependencies
|
2 |
+
gradio>=4.0.0
|
3 |
+
requests>=2.31.0
|
4 |
+
|
5 |
+
# LlamaIndex core
|
6 |
+
llama-index>=0.10.0
|
7 |
+
llama-index-llms-huggingface>=0.2.0
|
8 |
+
|
9 |
+
# Transformers and ML libraries
|
10 |
+
transformers>=4.35.0
|
11 |
+
torch>=2.0.0
|
12 |
+
tokenizers>=0.15.0
|
13 |
+
|
14 |
+
# NLP dependencies
|
15 |
+
nltk>=3.8.1
|
16 |
+
|
17 |
+
# Data processing
|
18 |
+
numpy>=1.24.0
|
19 |
+
pandas>=2.0.0
|
20 |
+
|
21 |
+
# Optional: for better model performance
|
22 |
+
accelerate>=0.24.0
|
23 |
+
bitsandbytes>=0.41.0
|
24 |
+
|
25 |
+
# Utilities
|
26 |
+
typing-extensions>=4.5.0
|
27 |
+
pydantic>=2.0.0
|