ashishja commited on
Commit
581911f
·
verified ·
1 Parent(s): 81917a3

updated agent code

Browse files
Files changed (1) hide show
  1. app.py +239 -190
app.py CHANGED
@@ -1,196 +1,245 @@
1
- import os
2
- import gradio as gr
 
 
 
 
 
3
  import requests
4
- import inspect
5
- import pandas as pd
6
-
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
-
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
-
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
- if profile:
31
- username= f"{profile.username}"
32
- print(f"User logged in: {username}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  else:
34
- print("User not logged in.")
35
- return "Please Login to Hugging Face with the button.", None
36
-
37
- api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
- submit_url = f"{api_url}/submit"
40
-
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
- try:
43
- agent = BasicAgent()
44
- except Exception as e:
45
- print(f"Error instantiating agent: {e}")
46
- return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
-
51
- # 2. Fetch Questions
52
- print(f"Fetching questions from: {questions_url}")
53
- try:
54
- response = requests.get(questions_url, timeout=15)
55
  response.raise_for_status()
56
- questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
- print(f"Fetched {len(questions_data)} questions.")
61
- except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
-
72
- # 3. Run your Agent
73
- results_log = []
74
- answers_payload = []
75
- print(f"Running agent on {len(questions_data)} questions...")
76
- for item in questions_data:
77
- task_id = item.get("task_id")
78
- question_text = item.get("question")
79
- if not task_id or question_text is None:
80
- print(f"Skipping item with missing task_id or question: {item}")
81
- continue
82
- try:
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
- except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- print(status_update)
98
-
99
- # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
- try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
- response.raise_for_status()
104
- result_data = response.json()
105
- final_status = (
106
- f"Submission Successful!\n"
107
- f"User: {result_data.get('username')}\n"
108
- f"Overall Score: {result_data.get('score', 'N/A')}% "
109
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
- f"Message: {result_data.get('message', 'No message received.')}"
111
- )
112
- print("Submission successful.")
113
- results_df = pd.DataFrame(results_log)
114
- return final_status, results_df
115
- except requests.exceptions.HTTPError as e:
116
- error_detail = f"Server responded with status {e.response.status_code}."
117
- try:
118
- error_json = e.response.json()
119
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- except requests.exceptions.JSONDecodeError:
121
- error_detail += f" Response: {e.response.text[:500]}"
122
- status_message = f"Submission Failed: {error_detail}"
123
- print(status_message)
124
- results_df = pd.DataFrame(results_log)
125
- return status_message, results_df
126
- except requests.exceptions.Timeout:
127
- status_message = "Submission Failed: The request timed out."
128
- print(status_message)
129
- results_df = pd.DataFrame(results_log)
130
- return status_message, results_df
131
- except requests.exceptions.RequestException as e:
132
- status_message = f"Submission Failed: Network error - {e}"
133
- print(status_message)
134
- results_df = pd.DataFrame(results_log)
135
- return status_message, results_df
136
- except Exception as e:
137
- status_message = f"An unexpected error occurred during submission: {e}"
138
- print(status_message)
139
- results_df = pd.DataFrame(results_log)
140
- return status_message, results_df
141
-
142
-
143
- # --- Build Gradio Interface using Blocks ---
144
- with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
- gr.Markdown(
147
- """
148
- **Instructions:**
149
-
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
- ---
155
- **Disclaimers:**
156
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
- """
159
- )
160
-
161
- gr.LoginButton()
162
-
163
- run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
173
-
174
- if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # Check for SPACE_HOST and SPACE_ID at startup for information
177
- space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
- if space_host_startup:
181
- print(f"✅ SPACE_HOST found: {space_host_startup}")
182
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
- else:
184
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
 
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- print(f"✅ SPACE_ID found: {space_id_startup}")
188
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
- else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
 
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
 
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from zoneinfo import ZoneInfo
2
+ from google.adk.agents import Agent,BaseAgent,LlmAgent
3
+ from google.adk.tools import google_search
4
+ from google.adk.runners import Runner
5
+ from google.adk.sessions import InMemorySessionService
6
+ from google.genai import types
7
+ import google.genai.types as types
8
  import requests
9
+ from google.adk.events import Event, EventActions
10
+ from google.adk.agents.invocation_context import InvocationContext
11
+ from typing import AsyncGenerator
12
+ from google.genai import types as genai_types
13
+ from google.adk.tools import ToolContext, FunctionTool
14
+ import logging
15
+ from google.adk.tools import built_in_code_execution
16
+ from google.adk.tools import agent_tool
17
+
18
+
19
+
20
+
21
+
22
+
23
+
24
+
25
+ logging.basicConfig(level=logging.ERROR)
26
+ #from google.adk.tools import agent_tool
27
+ url = 'https://agents-course-unit4-scoring.hf.space/questions'
28
+ headers = {'accept': 'application/json'}
29
+ response = requests.get(url, headers=headers)
30
+
31
+ # class responses_api(BaseAgent):
32
+ # async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
33
+ # # This method is called when the agent is run
34
+ # # You can implement your logic here
35
+ # # For example, you can call an external API or perform some calculations
36
+ # # and return the result
37
+ # url = 'https://agents-course-unit4-scoring.hf.space/questions'
38
+ # headers = {'accept': 'application/json'}
39
+ # response = requests.get(url, headers=headers)
40
+ # for i in response.json():
41
+ # if i['file_name'] != '':
42
+ # url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
43
+ # question = i['question']
44
+ # prompt = f"{question} and the file is {url_file}, give the final answer only"
45
+ # else:
46
+ # question = i['question']
47
+ # prompt = f"{question} give the final answer only"
48
+ # existing_responses = ctx.session.state.get("user:responses", [])
49
+ # existing_responses.append(prompt)
50
+ # ctx.session_state["user:responses"] = existing_responses
51
+
52
+ # # Optionally, yield a single event to indicate completion or provide some output
53
+ # yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions."))])
54
+
55
+
56
+
57
+ def answer_questions():
58
+ url = 'https://agents-course-unit4-scoring.hf.space/questions'
59
+ headers = {'accept': 'application/json'}
60
+ response = requests.get(url, headers=headers)
61
+ prompts = []
62
+ for i in response.json():
63
+ task_id = i['task_id']
64
+ if i['file_name'] != '':
65
+ url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
66
+ question = i['question']
67
+ prompt = f"{task_id}:{question} and the file is {url_file}, give the final answer only"
68
+ else:
69
+ question = i['question']
70
+ prompt = f"{task_id}:{question} give the final answer only"
71
+ prompts.append(prompt)
72
+ return prompts
73
+ #responses_api = responses_api(name= 'responses_api_1')
74
+ from typing import Dict, Any
75
+ def submit_questions(answers: list[str]) -> Dict[str, Any]:
76
+ url = 'https://agents-course-unit4-scoring.hf.space/submit'
77
+ payload = {
78
+ "username": "ashishja",
79
+ "agent_code": "your_agent_code",
80
+ "answers": answers}
81
+ headers = {'accept': 'application/json', "Content-Type": "application/json"}
82
+ response = requests.post(url, headers=headers, json =payload)
83
+ import json
84
+ print(json.dumps(payload, indent=2))
85
+ if response.status_code == 200:
86
+ return response.json()
87
  else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  response.raise_for_status()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
 
 
 
 
 
 
90
 
 
91
 
92
+
93
+ responses_api = FunctionTool(func= answer_questions)
94
+ submit_api = FunctionTool(func=submit_questions)
95
+
96
+ # class QuestionAnswerer(LlmAgent):
97
+ # async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
98
+ # questions_to_answer = ctx.session_service.get('fetched_questions', [])
99
+ # for q in questions_to_answer:
100
+ # answer = await self._llm(messages=[types.ChatMessage(role="user", parts=[types.Part(text=q)])])
101
+ # yield Event(author=self.name, content=answer.content)
102
+
103
+ # qa = QuestionAnswerer(name = 'qa_1', model="gemini-2.0-flash", description="Question Answerer")
104
+
105
+
106
+
107
+
108
+
109
+
110
+
111
+
112
+ APP_NAME="weather_sentiment_agent"
113
+ USER_ID="user1234"
114
+ SESSION_ID="1234"
115
+
116
+
117
+ code_agent = LlmAgent(
118
+ name='codegaiaAgent',
119
+ model="gemini-2.5-pro-preview-05-06",
120
+ description=(
121
+ "You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
122
+ ),
123
+ instruction = (
124
+ "if the question contains a file with .py ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
125
+ "If the question contains a spreadsheet file like .xlsx and .csv among others, get the file and depending on the question and the file provided, execute the code and provide the final answer. "
126
+ "use code like import pandas as pd , file = pd.read_csv('file.csv') and then use the file to answer the question. "
127
+ "if the question contains a file with .txt ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
128
+ "if the question contains a file with .json ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
129
+ "If you are writing code or if you get a code file, use the code execution tool to run the code and provide the final answer. "
130
+ )
131
+
132
+ ,
133
+ tools=[built_in_code_execution],
134
+ # Add the responses_api agent as a tool
135
+ #sub_agents=[responses_api]
136
+ )
137
+
138
+
139
+ search_agent = LlmAgent(
140
+ name='searchgaiaAgent',
141
+ model="gemini-2.5-pro-preview-05-06",
142
+ description=(
143
+ "You are a smart agent that can search the web and answer any questions provided access the given files and answer"
144
+ ),
145
+ instruction = (
146
+ "Get the url associated perform a search and consolidate the information provided and answer the provided question "
147
+ )
148
+
149
+ ,
150
+ tools=[google_search],
151
+ # Add the responses_api agent as a tool
152
+ #sub_agents=[responses_api]
153
+ )
154
+
155
+ image_agent = LlmAgent(
156
+ name='imagegaiaAgent',
157
+ model="gemini-2.5-pro-preview-05-06",
158
+ description=(
159
+ "You are a smart agent that can when given a image file and answer any questions related to it"
160
+ ),
161
+ instruction = (
162
+ "Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
163
+
164
+ ,
165
+ # tools=[google_search],
166
+ # Add the responses_api agent as a tool
167
+ #sub_agents=[responses_api]
168
+ )
169
+
170
+
171
+ youtube_agent = LlmAgent(
172
+ name='youtubegaiaAgent',
173
+ model="gemini-2.5-pro-preview-05-06",
174
+ description=(
175
+ "You are a smart agent that can when given a youtube link watch it and answer any questions related to it"
176
+ ),
177
+ instruction = (
178
+ "Get the youtube link associated use Gemini to watch the video and answer the provided question ")
179
+
180
+ ,
181
+ # tools=[google_search],
182
+ # Add the responses_api agent as a tool
183
+ #sub_agents=[responses_api]
184
+ )
185
+
186
+ root_agent = LlmAgent(
187
+ name='basegaiaAgent',
188
+ model="gemini-2.5-pro-preview-05-06",
189
+ description=(
190
+ "You are a smart agent that can answer any questions provided access the given files and answer"
191
+ ),
192
+ instruction = (
193
+ "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
194
+ "invoke your tool 'responses_api' to retrieve the questions. "
195
+ "Once you receive the list of questions, loop over each question and provide a concise answer for each based on the question and any provided file. "
196
+ "For every answer, return a dictionary with the keys task_id and submitted_answer, for example: "
197
+ "{'task_id': 'the-task-id', 'submitted_answer': 'your answer'}. "
198
+ "Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
199
+ )
200
+
201
+ ,
202
+ tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
203
+ agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
204
+ # Add the responses_api agent as a tool
205
+ #sub_agents=[responses_api]
206
+ )
207
+
208
+ # root_agent = LlmAgent(
209
+ # name='gaiaAgent',
210
+ # model="gemini-2.5-pro-preview-05-06",
211
+ # description=(
212
+ # "You are a smart agent that can answer any questions provided access the given files and answer"
213
+ # ),
214
+ # instruction = (
215
+ # "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
216
+ # "invoke base agent. "
217
+ # "Once you the answers check if are in correct format. "
218
+ # #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
219
+ # )
220
+
221
+ # ,
222
+ # #tools=[submit_api],
223
+ # # Add the responses_api agent as a tool
224
+ # sub_agents=[base_agent]
225
+ # )
226
+
227
+ session_service = InMemorySessionService()
228
+ session = session_service.create_session(app_name=APP_NAME, \
229
+ user_id=USER_ID,\
230
+ session_id=SESSION_ID)
231
+
232
+ runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
233
+ # # def send_query_to_agent(root_agent, query, session):
234
+ # # session = session
235
+ # # content = types.Content(role='user', parts=[types.Part(text=query)])
236
+
237
+
238
+
239
+
240
+ # # async def main():
241
+ # # await process_questions_and_answer()
242
+
243
+ # # if __name__ == "__main__":
244
+ # # import asyncio
245
+ # # asyncio.run(main())