LamiaYT commited on
Commit
0fda38b
Β·
1 Parent(s): 4a42cc8

Update app.py with full Gaia LlamaIndex agent

Browse files
Files changed (1) hide show
  1. app.py +49 -37
app.py CHANGED
@@ -1,20 +1,31 @@
1
- import gradio as gr
2
  import os
3
  import sys
4
  import json
 
5
  from typing import List, Dict
6
 
7
- # Add the current directory to Python path
 
 
 
 
 
 
 
 
 
 
 
8
  sys.path.append(os.path.dirname(os.path.abspath(__file__)))
9
 
10
- # βœ… Ensure GaiaAPI is imported at the top level
11
  from utils.gaia_api import GaiaAPI
12
 
13
- # Initialize variables
14
  AGENT_READY = False
15
  agent = None
16
 
17
- # Import other agent modules
18
  try:
19
  print("Importing modules...")
20
  from agent.local_llm import LocalLLM
@@ -24,7 +35,6 @@ try:
24
 
25
  print("All imports successful!")
26
 
27
- # Initialize components
28
  print("Initializing Local LLM...")
29
  local_llm = LocalLLM()
30
  llm = local_llm.get_llm()
@@ -44,11 +54,11 @@ try:
44
 
45
  except Exception as e:
46
  print(f"Failed to initialize agent: {str(e)}")
47
- import traceback
48
  traceback.print_exc()
49
  AGENT_READY = False
50
  agent = None
51
 
 
52
  def process_single_question(question_text: str) -> str:
53
  """Process a single GAIA question through the agent"""
54
  if not AGENT_READY:
@@ -59,14 +69,14 @@ def process_single_question(question_text: str) -> str:
59
 
60
  try:
61
  enhanced_prompt = f"""
62
- Answer the following question directly and concisely. Do not include "FINAL ANSWER" or any other prefixes in your response. Just provide the answer.
63
 
64
- Question: {question_text}
65
- """
66
  response = agent.query(enhanced_prompt)
67
  answer = str(response).strip()
68
 
69
- # Remove common prefixes
70
  for prefix in ["FINAL ANSWER:", "Answer:", "The answer is:", "Final answer:"]:
71
  if answer.startswith(prefix):
72
  answer = answer[len(prefix):].strip()
@@ -74,9 +84,9 @@ def process_single_question(question_text: str) -> str:
74
  return answer
75
 
76
  except Exception as e:
77
- import traceback
78
  return f"❌ Error: {str(e)}\n\n{traceback.format_exc()}"
79
 
 
80
  def process_all_questions() -> str:
81
  """Process all GAIA questions and prepare answers for submission"""
82
  if not AGENT_READY:
@@ -87,7 +97,7 @@ def process_all_questions() -> str:
87
  processed_answers = []
88
 
89
  for i, question in enumerate(questions):
90
- print(f"Processing question {i+1}/{len(questions)}: {question['task_id']}")
91
  answer = process_single_question(question['question'])
92
  processed_answers.append({
93
  "task_id": question['task_id'],
@@ -105,9 +115,9 @@ def process_all_questions() -> str:
105
  return summary
106
 
107
  except Exception as e:
108
- import traceback
109
  return f"❌ Error: {str(e)}\n\n{traceback.format_exc()}"
110
 
 
111
  def submit_to_gaia(username: str, code_url: str) -> str:
112
  """Submit answers to GAIA benchmark"""
113
  if not AGENT_READY:
@@ -131,6 +141,7 @@ def submit_to_gaia(username: str, code_url: str) -> str:
131
  except Exception as e:
132
  return f"❌ Submission error: {str(e)}"
133
 
 
134
  def get_sample_question() -> str:
135
  """Load a sample question for testing"""
136
  try:
@@ -139,15 +150,16 @@ def get_sample_question() -> str:
139
  except Exception as e:
140
  return f"Error loading sample question: {str(e)}"
141
 
 
142
  # ---------- Gradio UI ----------
143
  with gr.Blocks(title="πŸ¦™ GAIA LlamaIndex Agent") as demo:
144
  gr.Markdown(f"""
145
- # πŸ¦™ GAIA Benchmark Agent with LlamaIndex
146
-
147
- This agent uses LlamaIndex with a local LLM to tackle GAIA benchmark questions.
148
-
149
- **Status:** {"βœ… Ready" if AGENT_READY else "❌ Not Ready"}
150
- """)
151
 
152
  with gr.Tab("πŸ”¬ Test Single Question"):
153
  gr.Markdown("Test the agent with individual questions")
@@ -183,12 +195,12 @@ with gr.Blocks(title="πŸ¦™ GAIA LlamaIndex Agent") as demo:
183
 
184
  with gr.Tab("πŸ† Submit to GAIA"):
185
  gr.Markdown("""
186
- Submit your processed answers to the GAIA benchmark for official scoring.
187
 
188
- **Requirements:**
189
- 1. Your Hugging Face username
190
- 2. Link to your Space code (e.g., https://huggingface.co/spaces/your-username/gaia-agent)
191
- """)
192
 
193
  with gr.Row():
194
  with gr.Column():
@@ -203,17 +215,17 @@ with gr.Blocks(title="πŸ¦™ GAIA LlamaIndex Agent") as demo:
203
 
204
  with gr.Tab("ℹ️ Info & Debug"):
205
  gr.Markdown(f"""
206
- ## About This Agent
207
-
208
- This agent uses:
209
- - **LlamaIndex** (ReAct Agent + Tools)
210
- - **Local LLM** (e.g., DialoGPT or fallback GPT2)
211
- - **GAIA Tools** (question fetch, file reader, math, etc.)
212
-
213
- ## Current Status
214
- - Agent Ready: {"βœ… Yes" if AGENT_READY else "❌ No"}
215
- - Tools Loaded: {len(gaia_tools) if 'gaia_tools' in globals() else 0}
216
- """)
217
 
218
  if __name__ == "__main__":
219
- demo.launch(show_error=True)
 
 
1
  import os
2
  import sys
3
  import json
4
+ import traceback
5
  from typing import List, Dict
6
 
7
+ import gradio as gr
8
+ import nltk
9
+
10
+ # --- Environment variable setup to fix permission issues in Spaces or restricted envs ---
11
+ os.environ["NLTK_DATA"] = "/tmp/nltk_data"
12
+ os.environ["MPLCONFIGDIR"] = "/tmp/matplotlib"
13
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface_cache"
14
+
15
+ # Download required NLTK data upfront
16
+ nltk.download('punkt', download_dir=os.environ["NLTK_DATA"])
17
+
18
+ # Add current directory to path for local imports
19
  sys.path.append(os.path.dirname(os.path.abspath(__file__)))
20
 
21
+ # Import GaiaAPI early
22
  from utils.gaia_api import GaiaAPI
23
 
24
+ # Initialize global agent state
25
  AGENT_READY = False
26
  agent = None
27
 
28
+ # Import agent-related modules and initialize
29
  try:
30
  print("Importing modules...")
31
  from agent.local_llm import LocalLLM
 
35
 
36
  print("All imports successful!")
37
 
 
38
  print("Initializing Local LLM...")
39
  local_llm = LocalLLM()
40
  llm = local_llm.get_llm()
 
54
 
55
  except Exception as e:
56
  print(f"Failed to initialize agent: {str(e)}")
 
57
  traceback.print_exc()
58
  AGENT_READY = False
59
  agent = None
60
 
61
+
62
  def process_single_question(question_text: str) -> str:
63
  """Process a single GAIA question through the agent"""
64
  if not AGENT_READY:
 
69
 
70
  try:
71
  enhanced_prompt = f"""
72
+ Answer the following question directly and concisely. Do not include "FINAL ANSWER" or any other prefixes in your response. Just provide the answer.
73
 
74
+ Question: {question_text}
75
+ """
76
  response = agent.query(enhanced_prompt)
77
  answer = str(response).strip()
78
 
79
+ # Remove common prefixes from the answer
80
  for prefix in ["FINAL ANSWER:", "Answer:", "The answer is:", "Final answer:"]:
81
  if answer.startswith(prefix):
82
  answer = answer[len(prefix):].strip()
 
84
  return answer
85
 
86
  except Exception as e:
 
87
  return f"❌ Error: {str(e)}\n\n{traceback.format_exc()}"
88
 
89
+
90
  def process_all_questions() -> str:
91
  """Process all GAIA questions and prepare answers for submission"""
92
  if not AGENT_READY:
 
97
  processed_answers = []
98
 
99
  for i, question in enumerate(questions):
100
+ print(f"Processing question {i + 1}/{len(questions)}: {question['task_id']}")
101
  answer = process_single_question(question['question'])
102
  processed_answers.append({
103
  "task_id": question['task_id'],
 
115
  return summary
116
 
117
  except Exception as e:
 
118
  return f"❌ Error: {str(e)}\n\n{traceback.format_exc()}"
119
 
120
+
121
  def submit_to_gaia(username: str, code_url: str) -> str:
122
  """Submit answers to GAIA benchmark"""
123
  if not AGENT_READY:
 
141
  except Exception as e:
142
  return f"❌ Submission error: {str(e)}"
143
 
144
+
145
  def get_sample_question() -> str:
146
  """Load a sample question for testing"""
147
  try:
 
150
  except Exception as e:
151
  return f"Error loading sample question: {str(e)}"
152
 
153
+
154
  # ---------- Gradio UI ----------
155
  with gr.Blocks(title="πŸ¦™ GAIA LlamaIndex Agent") as demo:
156
  gr.Markdown(f"""
157
+ # πŸ¦™ GAIA Benchmark Agent with LlamaIndex
158
+
159
+ This agent uses LlamaIndex with a local LLM to tackle GAIA benchmark questions.
160
+
161
+ **Status:** {"βœ… Ready" if AGENT_READY else "❌ Not Ready"}
162
+ """)
163
 
164
  with gr.Tab("πŸ”¬ Test Single Question"):
165
  gr.Markdown("Test the agent with individual questions")
 
195
 
196
  with gr.Tab("πŸ† Submit to GAIA"):
197
  gr.Markdown("""
198
+ Submit your processed answers to the GAIA benchmark for official scoring.
199
 
200
+ **Requirements:**
201
+ 1. Your Hugging Face username
202
+ 2. Link to your Space code (e.g., https://huggingface.co/spaces/your-username/gaia-agent)
203
+ """)
204
 
205
  with gr.Row():
206
  with gr.Column():
 
215
 
216
  with gr.Tab("ℹ️ Info & Debug"):
217
  gr.Markdown(f"""
218
+ ## About This Agent
219
+
220
+ This agent uses:
221
+ - **LlamaIndex** (ReAct Agent + Tools)
222
+ - **Local LLM** (e.g., DialoGPT or fallback GPT2)
223
+ - **GAIA Tools** (question fetch, file reader, math, etc.)
224
+
225
+ ## Current Status
226
+ - Agent Ready: {"βœ… Yes" if AGENT_READY else "❌ No"}
227
+ - Tools Loaded: {len(gaia_tools) if 'gaia_tools' in globals() else 0}
228
+ """)
229
 
230
  if __name__ == "__main__":
231
+ demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)