ashishja commited on
Commit
d9109f7
·
verified ·
1 Parent(s): 7cf8dc6

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +239 -234
agent.py CHANGED
@@ -1,235 +1,240 @@
1
- from zoneinfo import ZoneInfo
2
- from google.adk.agents import Agent,BaseAgent,LlmAgent
3
- from google.adk.tools import google_search
4
- from google.adk.runners import Runner
5
- from google.adk.sessions import InMemorySessionService
6
- from google.genai import types
7
- import google.genai.types as types
8
- import requests
9
- from google.adk.events import Event, EventActions
10
- from google.adk.agents.invocation_context import InvocationContext
11
- from typing import AsyncGenerator
12
- from google.genai import types as genai_types
13
- from google.adk.tools import ToolContext, FunctionTool
14
- import logging
15
- #from google.adk.tools import built_in_code_execution
16
- from google.adk.tools import agent_tool
17
-
18
- logging.basicConfig(level=logging.ERROR)
19
- #from google.adk.tools import agent_tool
20
- url = 'https://agents-course-unit4-scoring.hf.space/questions'
21
- headers = {'accept': 'application/json'}
22
- response = requests.get(url, headers=headers)
23
-
24
- # class responses_api(BaseAgent):
25
- # async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
26
- # # This method is called when the agent is run
27
- # # You can implement your logic here
28
- # # For example, you can call an external API or perform some calculations
29
- # # and return the result
30
- # url = 'https://agents-course-unit4-scoring.hf.space/questions'
31
- # headers = {'accept': 'application/json'}
32
- # response = requests.get(url, headers=headers)
33
- # for i in response.json():
34
- # if i['file_name'] != '':
35
- # url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
36
- # question = i['question']
37
- # prompt = f"{question} and the file is {url_file}, give the final answer only"
38
- # else:
39
- # question = i['question']
40
- # prompt = f"{question} give the final answer only"
41
- # existing_responses = ctx.session.state.get("user:responses", [])
42
- # existing_responses.append(prompt)
43
- # ctx.session_state["user:responses"] = existing_responses
44
-
45
- # # Optionally, yield a single event to indicate completion or provide some output
46
- # yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions."))])
47
-
48
-
49
-
50
- def answer_questions():
51
- url = 'https://agents-course-unit4-scoring.hf.space/questions'
52
- headers = {'accept': 'application/json'}
53
- response = requests.get(url, headers=headers)
54
- prompts = []
55
- for i in response.json():
56
- task_id = i['task_id']
57
- if i['file_name'] != '':
58
- url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
59
- question = i['question']
60
- prompt = f"{task_id}:{question} and the file is {url_file}, give the final answer only"
61
- else:
62
- question = i['question']
63
- prompt = f"{task_id}:{question} give the final answer only"
64
- prompts.append(prompt)
65
- return prompts
66
- #responses_api = responses_api(name= 'responses_api_1')
67
- from typing import Dict, Any
68
- def submit_questions(answers: list[str]) -> Dict[str, Any]:
69
- url = 'https://agents-course-unit4-scoring.hf.space/submit'
70
- payload = {
71
- "username": "ashishja",
72
- "agent_code": "https://huggingface.co/spaces/ashishja/Agents_Course_Final_Assignment_Ashish/tree/main",
73
- "answers": answers}
74
- headers = {'accept': 'application/json', "Content-Type": "application/json"}
75
- response = requests.post(url, headers=headers, json =payload)
76
- import json
77
- print(json.dumps(payload, indent=2))
78
- if response.status_code == 200:
79
- return response.json()
80
- else:
81
- response.raise_for_status()
82
-
83
-
84
-
85
-
86
- responses_api = FunctionTool(func= answer_questions)
87
- submit_api = FunctionTool(func=submit_questions)
88
-
89
- # class QuestionAnswerer(LlmAgent):
90
- # async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
91
- # questions_to_answer = ctx.session_service.get('fetched_questions', [])
92
- # for q in questions_to_answer:
93
- # answer = await self._llm(messages=[types.ChatMessage(role="user", parts=[types.Part(text=q)])])
94
- # yield Event(author=self.name, content=answer.content)
95
-
96
- # qa = QuestionAnswerer(name = 'qa_1', model="gemini-2.0-flash", description="Question Answerer")
97
-
98
-
99
-
100
-
101
-
102
-
103
-
104
-
105
- APP_NAME="weather_sentiment_agent"
106
- USER_ID="user1234"
107
- SESSION_ID="1234"
108
-
109
-
110
- code_agent = LlmAgent(
111
- name='codegaiaAgent',
112
- model="gemini-2.5-pro-preview-05-06",
113
- description=(
114
- "You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
115
- ),
116
- instruction = (
117
- "if the question contains a file with .py ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
118
- "If the question contains a spreadsheet file like .xlsx and .csv among others, get the file and depending on the question and the file provided, execute the code and provide the final answer. "
119
- "use code like import pandas as pd , file = pd.read_csv('file.csv') and then use the file to answer the question. "
120
- "if the question contains a file with .txt ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
121
- "if the question contains a file with .json ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
122
- "If you are writing code or if you get a code file, use the code execution tool to run the code and provide the final answer. "
123
- )
124
-
125
- ,
126
- # tools=[built_in_code_execution],
127
- # Add the responses_api agent as a tool
128
- #sub_agents=[responses_api]
129
- )
130
-
131
-
132
- search_agent = LlmAgent(
133
- name='searchgaiaAgent',
134
- model="gemini-2.5-pro-preview-05-06",
135
- description=(
136
- "You are a smart agent that can search the web and answer any questions provided access the given files and answer"
137
- ),
138
- instruction = (
139
- "Get the url associated perform a search and consolidate the information provided and answer the provided question "
140
- )
141
-
142
- ,
143
- tools=[google_search],
144
- # Add the responses_api agent as a tool
145
- #sub_agents=[responses_api]
146
- )
147
-
148
- image_agent = LlmAgent(
149
- name='imagegaiaAgent',
150
- model="gemini-2.5-pro-preview-05-06",
151
- description=(
152
- "You are a smart agent that can when given a image file and answer any questions related to it"
153
- ),
154
- instruction = (
155
- "Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
156
-
157
- ,
158
- # tools=[google_search],
159
- # Add the responses_api agent as a tool
160
- #sub_agents=[responses_api]
161
- )
162
-
163
-
164
- youtube_agent = LlmAgent(
165
- name='youtubegaiaAgent',
166
- model="gemini-2.5-pro-preview-05-06",
167
- description=(
168
- "You are a smart agent that can when given a youtube link watch it and answer any questions related to it"
169
- ),
170
- instruction = (
171
- "Get the youtube link associated use Gemini to watch the video and answer the provided question ")
172
-
173
- ,
174
- # tools=[google_search],
175
- # Add the responses_api agent as a tool
176
- #sub_agents=[responses_api]
177
- )
178
-
179
- root_agent = LlmAgent(
180
- name='basegaiaAgent',
181
- model="gemini-2.5-pro-preview-05-06",
182
- description=(
183
- "You are a smart agent that can answer any questions provided access the given files and answer"
184
- ),
185
- instruction = (
186
- "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
187
- "invoke your tool 'responses_api' to retrieve the questions. "
188
- "Once you receive the list of questions, loop over each question and provide a concise answer for each based on the question and any provided file. "
189
- "For every answer, return a dictionary with the keys task_id and submitted_answer, for example: "
190
- "{'task_id': 'the-task-id', 'submitted_answer': 'your answer'}. "
191
- "Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
192
- )
193
-
194
- ,
195
- tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
196
- agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
197
- # Add the responses_api agent as a tool
198
- #sub_agents=[responses_api]
199
- )
200
-
201
- # root_agent = LlmAgent(
202
- # name='gaiaAgent',
203
- # model="gemini-2.5-pro-preview-05-06",
204
- # description=(
205
- # "You are a smart agent that can answer any questions provided access the given files and answer"
206
- # ),
207
- # instruction = (
208
- # "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
209
- # "invoke base agent. "
210
- # "Once you the answers check if are in correct format. "
211
- # #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
212
- # )
213
-
214
- # ,
215
- # #tools=[submit_api],
216
- # # Add the responses_api agent as a tool
217
- # sub_agents=[base_agent]
218
- # )
219
-
220
- session_service = InMemorySessionService()
221
- session = session_service.create_session(app_name=APP_NAME, \
222
- user_id=USER_ID,\
223
- session_id=SESSION_ID)
224
-
225
- runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
226
- # # def send_query_to_agent(root_agent, query, session):
227
- # # session = session
228
- # # content = types.Content(role='user', parts=[types.Part(text=query)])
229
-
230
- # # async def main():
231
- # # await process_questions_and_answer()
232
-
233
- # # if __name__ == "__main__":
234
- # # import asyncio
 
 
 
 
 
235
  # # asyncio.run(main())
 
1
+ from zoneinfo import ZoneInfo
2
+ from google.adk.agents import Agent,BaseAgent,LlmAgent
3
+ from google.adk.tools import google_search
4
+ from google.adk.runners import Runner
5
+ from google.adk.sessions import InMemorySessionService
6
+ from google.genai import types
7
+ import google.genai.types as types
8
+ import requests
9
+ from google.adk.events import Event, EventActions
10
+ from google.adk.agents.invocation_context import InvocationContext
11
+ from typing import AsyncGenerator
12
+ from google.genai import types as genai_types
13
+ from google.adk.tools import ToolContext, FunctionTool
14
+ import logging
15
+ #from google.adk.tools import built_in_code_execution
16
+ from google.adk.tools import agent_tool
17
+
18
+ logging.basicConfig(level=logging.ERROR)
19
+ #from google.adk.tools import agent_tool
20
+ url = 'https://agents-course-unit4-scoring.hf.space/questions'
21
+ headers = {'accept': 'application/json'}
22
+ response = requests.get(url, headers=headers)
23
+
24
+ # class responses_api(BaseAgent):
25
+ # async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
26
+ # # This method is called when the agent is run
27
+ # # You can implement your logic here
28
+ # # For example, you can call an external API or perform some calculations
29
+ # # and return the result
30
+ # url = 'https://agents-course-unit4-scoring.hf.space/questions'
31
+ # headers = {'accept': 'application/json'}
32
+ # response = requests.get(url, headers=headers)
33
+ # for i in response.json():
34
+ # if i['file_name'] != '':
35
+ # url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
36
+ # question = i['question']
37
+ # prompt = f"{question} and the file is {url_file}, give the final answer only"
38
+ # else:
39
+ # question = i['question']
40
+ # prompt = f"{question} give the final answer only"
41
+ # existing_responses = ctx.session.state.get("user:responses", [])
42
+ # existing_responses.append(prompt)
43
+ # ctx.session_state["user:responses"] = existing_responses
44
+
45
+ # # Optionally, yield a single event to indicate completion or provide some output
46
+ # yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions."))])
47
+
48
+
49
+
50
+ def answer_questions():
51
+ """Fetch questions from the GAIA API and return them in a structured format"""
52
+ url = 'https://agents-course-unit4-scoring.hf.space/questions'
53
+ headers = {'accept': 'application/json'}
54
+ response = requests.get(url, headers=headers)
55
+
56
+ if response.status_code != 200:
57
+ return f"Error fetching questions: {response.status_code}"
58
+
59
+ questions_data = response.json()
60
+ return questions_data
61
+ #responses_api = responses_api(name= 'responses_api_1')
62
+ from typing import Dict, Any
63
+ def submit_questions(answers: list[str]) -> Dict[str, Any]:
64
+ url = 'https://agents-course-unit4-scoring.hf.space/submit'
65
+ payload = {
66
+ "username": "ashishja",
67
+ "agent_code": "https://huggingface.co/spaces/ashishja/Agents_Course_Final_Assignment_Ashish/tree/main",
68
+ "answers": answers}
69
+ headers = {'accept': 'application/json', "Content-Type": "application/json"}
70
+ response = requests.post(url, headers=headers, json =payload)
71
+ import json
72
+ print(json.dumps(payload, indent=2))
73
+ if response.status_code == 200:
74
+ return response.json()
75
+ else:
76
+ response.raise_for_status()
77
+
78
+
79
+
80
+
81
+ responses_api = FunctionTool(func= answer_questions)
82
+ submit_api = FunctionTool(func=submit_questions)
83
+
84
+ # class QuestionAnswerer(LlmAgent):
85
+ # async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
86
+ # questions_to_answer = ctx.session_service.get('fetched_questions', [])
87
+ # for q in questions_to_answer:
88
+ # answer = await self._llm(messages=[types.ChatMessage(role="user", parts=[types.Part(text=q)])])
89
+ # yield Event(author=self.name, content=answer.content)
90
+
91
+ # qa = QuestionAnswerer(name = 'qa_1', model="gemini-2.0-flash", description="Question Answerer")
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+ APP_NAME="weather_sentiment_agent"
101
+ USER_ID="user1234"
102
+ SESSION_ID="1234"
103
+
104
+
105
+ code_agent = LlmAgent(
106
+ name='codegaiaAgent',
107
+ model="gemini-2.5-pro-preview-05-06",
108
+ description=(
109
+ "You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
110
+ ),
111
+ instruction = (
112
+ "if the question contains a file with .py ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
113
+ "If the question contains a spreadsheet file like .xlsx and .csv among others, get the file and depending on the question and the file provided, execute the code and provide the final answer. "
114
+ "use code like import pandas as pd , file = pd.read_csv('file.csv') and then use the file to answer the question. "
115
+ "if the question contains a file with .txt ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
116
+ "if the question contains a file with .json ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
117
+ "If you are writing code or if you get a code file, use the code execution tool to run the code and provide the final answer. "
118
+ )
119
+
120
+ ,
121
+ # tools=[built_in_code_execution],
122
+ # Add the responses_api agent as a tool
123
+ #sub_agents=[responses_api]
124
+ )
125
+
126
+
127
+ search_agent = LlmAgent(
128
+ name='searchgaiaAgent',
129
+ model="gemini-2.5-pro-preview-05-06",
130
+ description=(
131
+ "You are a smart agent that can search the web and answer any questions provided access the given files and answer"
132
+ ),
133
+ instruction = (
134
+ "Get the url associated perform a search and consolidate the information provided and answer the provided question "
135
+ )
136
+
137
+ ,
138
+ tools=[google_search],
139
+ # Add the responses_api agent as a tool
140
+ #sub_agents=[responses_api]
141
+ )
142
+
143
+ image_agent = LlmAgent(
144
+ name='imagegaiaAgent',
145
+ model="gemini-2.5-pro-preview-05-06",
146
+ description=(
147
+ "You are a smart agent that can when given a image file and answer any questions related to it"
148
+ ),
149
+ instruction = (
150
+ "Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
151
+
152
+ ,
153
+ # tools=[google_search],
154
+ # Add the responses_api agent as a tool
155
+ #sub_agents=[responses_api]
156
+ )
157
+
158
+
159
+ youtube_agent = LlmAgent(
160
+ name='youtubegaiaAgent',
161
+ model="gemini-2.5-pro-preview-05-06",
162
+ description=(
163
+ "You are a smart agent that can when given a youtube link watch it and answer any questions related to it"
164
+ ),
165
+ instruction = (
166
+ "Get the youtube link associated use Gemini to watch the video and answer the provided question ")
167
+
168
+ ,
169
+ # tools=[google_search],
170
+ # Add the responses_api agent as a tool
171
+ #sub_agents=[responses_api]
172
+ )
173
+
174
+ root_agent = LlmAgent(
175
+ name='basegaiaAgent',
176
+ model="gemini-2.5-pro-preview-05-06",
177
+ description=(
178
+ "You are a smart agent that can answer any questions provided access the given files and answer"
179
+ ),
180
+ instruction = (
181
+ "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
182
+ "invoke your tool 'responses_api' to retrieve the questions data. "
183
+ "The questions data will be a list of dictionaries, each containing 'task_id', 'question', and 'file_name' fields. "
184
+ "For each question in the data: "
185
+ "1. If file_name is not empty, the file can be accessed at https://agents-course-unit4-scoring.hf.space/files/{task_id} "
186
+ "2. Use appropriate sub-agents based on question type (code_agent for coding, search_agent for web search, etc.) "
187
+ "3. Provide a concise, direct answer for each question "
188
+ "4. Return a dictionary with keys 'task_id' and 'submitted_answer' for each answer "
189
+ "5. Collect all dictionaries in a list and pass to 'submit_api' tool to submit the answers. "
190
+ "Always provide direct, factual answers without prefixes like 'The answer is:' or 'Final answer:'"
191
+ )
192
+
193
+ ,
194
+ tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
195
+ agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
196
+ # Add the responses_api agent as a tool
197
+ #sub_agents=[responses_api]
198
+ )
199
+
200
+ # root_agent = LlmAgent(
201
+ # name='gaiaAgent',
202
+ # model="gemini-2.5-pro-preview-05-06",
203
+ # description=(
204
+ # "You are a smart agent that can answer any questions provided access the given files and answer"
205
+ # ),
206
+ # instruction = (
207
+ # "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
208
+ # "invoke base agent. "
209
+ # "Once you the answers check if are in correct format. "
210
+ # #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
211
+ # )
212
+
213
+ # ,
214
+ # #tools=[submit_api],
215
+ # # Add the responses_api agent as a tool
216
+ # sub_agents=[base_agent]
217
+ # )
218
+
219
+ session_service = InMemorySessionService()
220
+ # Create the default session synchronously
221
+ try:
222
+ session = session_service.create_session(app_name=APP_NAME,
223
+ user_id=USER_ID,
224
+ session_id=SESSION_ID)
225
+ print(f"✅ Default session created: {SESSION_ID}")
226
+ except Exception as e:
227
+ print(f"⚠️ Error creating default session: {e}")
228
+ session = None
229
+
230
+ runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
231
+ # # def send_query_to_agent(root_agent, query, session):
232
+ # # session = session
233
+ # # content = types.Content(role='user', parts=[types.Part(text=query)])
234
+
235
+ # # async def main():
236
+ # # await process_questions_and_answer()
237
+
238
+ # # if __name__ == "__main__":
239
+ # # import asyncio
240
  # # asyncio.run(main())