LamiaYT commited on
Commit
757ebd9
·
1 Parent(s): 00010f6

Initial commit with LlamaIndex-based agent

Browse files
Files changed (1) hide show
  1. app.py +51 -43
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # Updated imports
2
  from llama_index.llms.huggingface import HuggingFaceLLM
3
  from llama_index.core.agent import ReActAgent
4
  from llama_index.core.tools import FunctionTool
@@ -8,6 +8,10 @@ import gradio as gr
8
  import requests
9
  import pandas as pd
10
 
 
 
 
 
11
  # --- Constants ---
12
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
13
 
@@ -15,7 +19,7 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
  class SmartAgent:
16
  def __init__(self):
17
  print("Initializing Local LLM Agent...")
18
-
19
  # Initialize Zephyr-7B model
20
  self.llm = HuggingFaceLLM(
21
  model_name="HuggingFaceH4/zephyr-7b-beta",
@@ -25,22 +29,22 @@ class SmartAgent:
25
  generate_kwargs={"temperature": 0.7, "do_sample": True},
26
  device_map="auto"
27
  )
28
-
29
- # Define tools
30
  self.tools = [
31
  FunctionTool.from_defaults(
32
  fn=self.web_search,
33
  name="web_search",
34
- description="Searches the web for current information when questions require up-to-date knowledge"
35
  ),
36
  FunctionTool.from_defaults(
37
  fn=self.math_calculator,
38
  name="math_calculator",
39
- description="Performs mathematical calculations when questions involve numbers or equations"
40
  )
41
  ]
42
-
43
- # Create agent
44
  self.agent = ReActAgent.from_tools(
45
  tools=self.tools,
46
  llm=self.llm,
@@ -49,18 +53,29 @@ class SmartAgent:
49
  print("Local LLM Agent initialized successfully.")
50
 
51
  def web_search(self, query: str) -> str:
52
- """Simulated web search tool (replace with actual API)"""
53
  print(f"Web search triggered for: {query[:50]}...")
54
- return f"Web results for: {query} (implement actual search API here)"
 
 
 
 
 
 
 
 
 
55
 
56
  def math_calculator(self, expression: str) -> str:
57
- """Simple math calculator"""
58
  print(f"Math calculation triggered for: {expression}")
59
  try:
60
- result = eval(expression) # Note: In production, use safer eval alternatives
61
  return str(result)
62
- except:
63
- return "Error: Could not evaluate the mathematical expression"
 
 
64
 
65
  def __call__(self, question: str) -> str:
66
  print(f"Processing question (first 50 chars): {question[:50]}...")
@@ -71,17 +86,17 @@ class SmartAgent:
71
  print(f"Agent error: {str(e)}")
72
  return f"Error processing question: {str(e)}"
73
 
74
- # --- Original Submission Logic (Keep unchanged) ---
 
75
  def run_and_submit_all(profile: gr.OAuthProfile | None):
76
  """
77
  Fetches all questions, runs the agent on them, submits all answers,
78
  and displays the results.
79
  """
80
- # --- Determine HF Space Runtime URL and Repo URL ---
81
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
82
 
83
  if profile:
84
- username= f"{profile.username}"
85
  print(f"User logged in: {username}")
86
  else:
87
  print("User not logged in.")
@@ -91,38 +106,38 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
91
  questions_url = f"{api_url}/questions"
92
  submit_url = f"{api_url}/submit"
93
 
94
- # 1. Instantiate Agent
95
  try:
96
- agent = SmartAgent() # Using our new SmartAgent instead of BasicAgent
97
  except Exception as e:
98
  print(f"Error instantiating agent: {e}")
99
  return f"Error initializing agent: {e}", None
100
-
101
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
102
  print(agent_code)
103
 
104
- # 2. Fetch Questions
105
  print(f"Fetching questions from: {questions_url}")
106
  try:
107
  response = requests.get(questions_url, timeout=15)
108
  response.raise_for_status()
109
  questions_data = response.json()
110
  if not questions_data:
111
- print("Fetched questions list is empty.")
112
- return "Fetched questions list is empty or invalid format.", None
113
  print(f"Fetched {len(questions_data)} questions.")
114
  except requests.exceptions.RequestException as e:
115
  print(f"Error fetching questions: {e}")
116
  return f"Error fetching questions: {e}", None
117
  except requests.exceptions.JSONDecodeError as e:
118
- print(f"Error decoding JSON response from questions endpoint: {e}")
119
- print(f"Response text: {response.text[:500]}")
120
- return f"Error decoding server response for questions: {e}", None
121
  except Exception as e:
122
  print(f"An unexpected error occurred fetching questions: {e}")
123
  return f"An unexpected error occurred fetching questions: {e}", None
124
 
125
- # 3. Run your Agent
126
  results_log = []
127
  answers_payload = []
128
  print(f"Running agent on {len(questions_data)} questions...")
@@ -137,19 +152,19 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
137
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
138
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
139
  except Exception as e:
140
- print(f"Error running agent on task {task_id}: {e}")
141
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
142
 
143
  if not answers_payload:
144
  print("Agent did not produce any answers to submit.")
145
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
146
 
147
- # 4. Prepare Submission
148
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
149
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
150
  print(status_update)
151
 
152
- # 5. Submit
153
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
154
  try:
155
  response = requests.post(submit_url, json=submission_data, timeout=60)
@@ -192,7 +207,8 @@ def run_and_submit_all(profile: gr.OAuthProfile | None):
192
  results_df = pd.DataFrame(results_log)
193
  return status_message, results_df
194
 
195
- # --- Build Gradio Interface ---
 
196
  with gr.Blocks() as demo:
197
  gr.Markdown("# Local LLM Agent Evaluation Runner")
198
  gr.Markdown(
@@ -214,6 +230,7 @@ with gr.Blocks() as demo:
214
  outputs=[status_output, results_table]
215
  )
216
 
 
217
  if __name__ == "__main__":
218
  print("\n" + "-"*30 + " App Starting " + "-"*30)
219
  space_host_startup = os.getenv("SPACE_HOST")
@@ -221,17 +238,8 @@ if __name__ == "__main__":
221
 
222
  if space_host_startup:
223
  print(f"✅ SPACE_HOST found: {space_host_startup}")
224
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
225
- else:
226
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
227
-
228
- if space_id_startup:
229
  print(f"✅ SPACE_ID found: {space_id_startup}")
230
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
231
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
232
  else:
233
- print("ℹ️ SPACE_ID environment variable not found (running locally?).")
234
 
235
- print("-"*(60 + len(" App Starting ")) + "\n")
236
- print("Launching Gradio Interface for Local LLM Agent Evaluation...")
237
  demo.launch()
 
1
+ # app.py
2
  from llama_index.llms.huggingface import HuggingFaceLLM
3
  from llama_index.core.agent import ReActAgent
4
  from llama_index.core.tools import FunctionTool
 
8
  import requests
9
  import pandas as pd
10
 
11
+ from duckduckgo_search import DDGS
12
+ from sympy import sympify
13
+ from sympy.core.sympify import SympifyError
14
+
15
  # --- Constants ---
16
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
17
 
 
19
  class SmartAgent:
20
  def __init__(self):
21
  print("Initializing Local LLM Agent...")
22
+
23
  # Initialize Zephyr-7B model
24
  self.llm = HuggingFaceLLM(
25
  model_name="HuggingFaceH4/zephyr-7b-beta",
 
29
  generate_kwargs={"temperature": 0.7, "do_sample": True},
30
  device_map="auto"
31
  )
32
+
33
+ # Define tools with real implementations
34
  self.tools = [
35
  FunctionTool.from_defaults(
36
  fn=self.web_search,
37
  name="web_search",
38
+ description="Searches the web for current information using DuckDuckGo."
39
  ),
40
  FunctionTool.from_defaults(
41
  fn=self.math_calculator,
42
  name="math_calculator",
43
+ description="Performs symbolic math using SymPy."
44
  )
45
  ]
46
+
47
+ # Create ReAct agent with tools
48
  self.agent = ReActAgent.from_tools(
49
  tools=self.tools,
50
  llm=self.llm,
 
53
  print("Local LLM Agent initialized successfully.")
54
 
55
  def web_search(self, query: str) -> str:
56
+ """Real web search using DuckDuckGo"""
57
  print(f"Web search triggered for: {query[:50]}...")
58
+ try:
59
+ with DDGS() as ddgs:
60
+ results = ddgs.text(query, max_results=3)
61
+ if results:
62
+ return "\n\n".join([f"{r['title']}: {r['href']}" for r in results])
63
+ else:
64
+ return "No results found."
65
+ except Exception as e:
66
+ print(f"Web search error: {e}")
67
+ return f"Error during web search: {e}"
68
 
69
  def math_calculator(self, expression: str) -> str:
70
+ """Safe math evaluation using SymPy"""
71
  print(f"Math calculation triggered for: {expression}")
72
  try:
73
+ result = sympify(expression).evalf()
74
  return str(result)
75
+ except SympifyError as e:
76
+ return f"Error: Could not parse the expression ({e})"
77
+ except Exception as e:
78
+ return f"Error: Calculation failed ({e})"
79
 
80
  def __call__(self, question: str) -> str:
81
  print(f"Processing question (first 50 chars): {question[:50]}...")
 
86
  print(f"Agent error: {str(e)}")
87
  return f"Error processing question: {str(e)}"
88
 
89
+
90
+ # --- Original Submission Logic ---
91
  def run_and_submit_all(profile: gr.OAuthProfile | None):
92
  """
93
  Fetches all questions, runs the agent on them, submits all answers,
94
  and displays the results.
95
  """
96
+ space_id = os.getenv("SPACE_ID")
 
97
 
98
  if profile:
99
+ username = f"{profile.username}"
100
  print(f"User logged in: {username}")
101
  else:
102
  print("User not logged in.")
 
106
  questions_url = f"{api_url}/questions"
107
  submit_url = f"{api_url}/submit"
108
 
109
+ # Instantiate Agent
110
  try:
111
+ agent = SmartAgent()
112
  except Exception as e:
113
  print(f"Error instantiating agent: {e}")
114
  return f"Error initializing agent: {e}", None
115
+
116
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
117
  print(agent_code)
118
 
119
+ # Fetch Questions
120
  print(f"Fetching questions from: {questions_url}")
121
  try:
122
  response = requests.get(questions_url, timeout=15)
123
  response.raise_for_status()
124
  questions_data = response.json()
125
  if not questions_data:
126
+ print("Fetched questions list is empty.")
127
+ return "Fetched questions list is empty or invalid format.", None
128
  print(f"Fetched {len(questions_data)} questions.")
129
  except requests.exceptions.RequestException as e:
130
  print(f"Error fetching questions: {e}")
131
  return f"Error fetching questions: {e}", None
132
  except requests.exceptions.JSONDecodeError as e:
133
+ print(f"Error decoding JSON response from questions endpoint: {e}")
134
+ print(f"Response text: {response.text[:500]}")
135
+ return f"Error decoding server response for questions: {e}", None
136
  except Exception as e:
137
  print(f"An unexpected error occurred fetching questions: {e}")
138
  return f"An unexpected error occurred fetching questions: {e}", None
139
 
140
+ # Run Agent on all questions
141
  results_log = []
142
  answers_payload = []
143
  print(f"Running agent on {len(questions_data)} questions...")
 
152
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
153
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
154
  except Exception as e:
155
+ print(f"Error running agent on task {task_id}: {e}")
156
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
157
 
158
  if not answers_payload:
159
  print("Agent did not produce any answers to submit.")
160
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
161
 
162
+ # Prepare submission
163
  submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
164
  status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
165
  print(status_update)
166
 
167
+ # Submit
168
  print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
169
  try:
170
  response = requests.post(submit_url, json=submission_data, timeout=60)
 
207
  results_df = pd.DataFrame(results_log)
208
  return status_message, results_df
209
 
210
+
211
+ # --- Gradio UI ---
212
  with gr.Blocks() as demo:
213
  gr.Markdown("# Local LLM Agent Evaluation Runner")
214
  gr.Markdown(
 
230
  outputs=[status_output, results_table]
231
  )
232
 
233
+
234
  if __name__ == "__main__":
235
  print("\n" + "-"*30 + " App Starting " + "-"*30)
236
  space_host_startup = os.getenv("SPACE_HOST")
 
238
 
239
  if space_host_startup:
240
  print(f"✅ SPACE_HOST found: {space_host_startup}")
 
 
 
 
 
241
  print(f"✅ SPACE_ID found: {space_id_startup}")
 
 
242
  else:
243
+ print(" SPACE_HOST not found. Please set environment variables.")
244
 
 
 
245
  demo.launch()