ashishja commited on
Commit
b4d1371
·
verified ·
1 Parent(s): 2cbcfd3

Upload agent.py

Browse files
Files changed (1) hide show
  1. agent.py +235 -0
agent.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from zoneinfo import ZoneInfo
2
+ from google.adk.agents import Agent,BaseAgent,LlmAgent
3
+ from google.adk.tools import google_search
4
+ from google.adk.runners import Runner
5
+ from google.adk.sessions import InMemorySessionService
6
+ from google.genai import types
7
+ import google.genai.types as types
8
+ import requests
9
+ from google.adk.events import Event, EventActions
10
+ from google.adk.agents.invocation_context import InvocationContext
11
+ from typing import AsyncGenerator
12
+ from google.genai import types as genai_types
13
+ from google.adk.tools import ToolContext, FunctionTool
14
+ import logging
15
+ #from google.adk.tools import built_in_code_execution
16
+ from google.adk.tools import agent_tool
17
+
18
+ logging.basicConfig(level=logging.ERROR)
19
+ #from google.adk.tools import agent_tool
20
+ url = 'https://agents-course-unit4-scoring.hf.space/questions'
21
+ headers = {'accept': 'application/json'}
22
+ response = requests.get(url, headers=headers)
23
+
24
+ # class responses_api(BaseAgent):
25
+ # async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
26
+ # # This method is called when the agent is run
27
+ # # You can implement your logic here
28
+ # # For example, you can call an external API or perform some calculations
29
+ # # and return the result
30
+ # url = 'https://agents-course-unit4-scoring.hf.space/questions'
31
+ # headers = {'accept': 'application/json'}
32
+ # response = requests.get(url, headers=headers)
33
+ # for i in response.json():
34
+ # if i['file_name'] != '':
35
+ # url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
36
+ # question = i['question']
37
+ # prompt = f"{question} and the file is {url_file}, give the final answer only"
38
+ # else:
39
+ # question = i['question']
40
+ # prompt = f"{question} give the final answer only"
41
+ # existing_responses = ctx.session.state.get("user:responses", [])
42
+ # existing_responses.append(prompt)
43
+ # ctx.session_state["user:responses"] = existing_responses
44
+
45
+ # # Optionally, yield a single event to indicate completion or provide some output
46
+ # yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions."))])
47
+
48
+
49
+
50
+ def answer_questions():
51
+ url = 'https://agents-course-unit4-scoring.hf.space/questions'
52
+ headers = {'accept': 'application/json'}
53
+ response = requests.get(url, headers=headers)
54
+ prompts = []
55
+ for i in response.json():
56
+ task_id = i['task_id']
57
+ if i['file_name'] != '':
58
+ url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
59
+ question = i['question']
60
+ prompt = f"{task_id}:{question} and the file is {url_file}, give the final answer only"
61
+ else:
62
+ question = i['question']
63
+ prompt = f"{task_id}:{question} give the final answer only"
64
+ prompts.append(prompt)
65
+ return prompts
66
+ #responses_api = responses_api(name= 'responses_api_1')
67
+ from typing import Dict, Any
68
+ def submit_questions(answers: list[str]) -> Dict[str, Any]:
69
+ url = 'https://agents-course-unit4-scoring.hf.space/submit'
70
+ payload = {
71
+ "username": "ashishja",
72
+ "agent_code": "https://huggingface.co/spaces/ashishja/Agents_Course_Final_Assignment_Ashish/tree/main",
73
+ "answers": answers}
74
+ headers = {'accept': 'application/json', "Content-Type": "application/json"}
75
+ response = requests.post(url, headers=headers, json =payload)
76
+ import json
77
+ print(json.dumps(payload, indent=2))
78
+ if response.status_code == 200:
79
+ return response.json()
80
+ else:
81
+ response.raise_for_status()
82
+
83
+
84
+
85
+
86
+ responses_api = FunctionTool(func= answer_questions)
87
+ submit_api = FunctionTool(func=submit_questions)
88
+
89
+ # class QuestionAnswerer(LlmAgent):
90
+ # async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
91
+ # questions_to_answer = ctx.session_service.get('fetched_questions', [])
92
+ # for q in questions_to_answer:
93
+ # answer = await self._llm(messages=[types.ChatMessage(role="user", parts=[types.Part(text=q)])])
94
+ # yield Event(author=self.name, content=answer.content)
95
+
96
+ # qa = QuestionAnswerer(name = 'qa_1', model="gemini-2.0-flash", description="Question Answerer")
97
+
98
+
99
+
100
+
101
+
102
+
103
+
104
+
105
+ APP_NAME="weather_sentiment_agent"
106
+ USER_ID="user1234"
107
+ SESSION_ID="1234"
108
+
109
+
110
+ code_agent = LlmAgent(
111
+ name='codegaiaAgent',
112
+ model="gemini-2.5-pro-preview-05-06",
113
+ description=(
114
+ "You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
115
+ ),
116
+ instruction = (
117
+ "if the question contains a file with .py ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
118
+ "If the question contains a spreadsheet file like .xlsx and .csv among others, get the file and depending on the question and the file provided, execute the code and provide the final answer. "
119
+ "use code like import pandas as pd , file = pd.read_csv('file.csv') and then use the file to answer the question. "
120
+ "if the question contains a file with .txt ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
121
+ "if the question contains a file with .json ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
122
+ "If you are writing code or if you get a code file, use the code execution tool to run the code and provide the final answer. "
123
+ )
124
+
125
+ ,
126
+ # tools=[built_in_code_execution],
127
+ # Add the responses_api agent as a tool
128
+ #sub_agents=[responses_api]
129
+ )
130
+
131
+
132
+ search_agent = LlmAgent(
133
+ name='searchgaiaAgent',
134
+ model="gemini-2.5-pro-preview-05-06",
135
+ description=(
136
+ "You are a smart agent that can search the web and answer any questions provided access the given files and answer"
137
+ ),
138
+ instruction = (
139
+ "Get the url associated perform a search and consolidate the information provided and answer the provided question "
140
+ )
141
+
142
+ ,
143
+ tools=[google_search],
144
+ # Add the responses_api agent as a tool
145
+ #sub_agents=[responses_api]
146
+ )
147
+
148
+ image_agent = LlmAgent(
149
+ name='imagegaiaAgent',
150
+ model="gemini-2.5-pro-preview-05-06",
151
+ description=(
152
+ "You are a smart agent that can when given a image file and answer any questions related to it"
153
+ ),
154
+ instruction = (
155
+ "Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
156
+
157
+ ,
158
+ # tools=[google_search],
159
+ # Add the responses_api agent as a tool
160
+ #sub_agents=[responses_api]
161
+ )
162
+
163
+
164
+ youtube_agent = LlmAgent(
165
+ name='youtubegaiaAgent',
166
+ model="gemini-2.5-pro-preview-05-06",
167
+ description=(
168
+ "You are a smart agent that can when given a youtube link watch it and answer any questions related to it"
169
+ ),
170
+ instruction = (
171
+ "Get the youtube link associated use Gemini to watch the video and answer the provided question ")
172
+
173
+ ,
174
+ # tools=[google_search],
175
+ # Add the responses_api agent as a tool
176
+ #sub_agents=[responses_api]
177
+ )
178
+
179
+ root_agent = LlmAgent(
180
+ name='basegaiaAgent',
181
+ model="gemini-2.5-pro-preview-05-06",
182
+ description=(
183
+ "You are a smart agent that can answer any questions provided access the given files and answer"
184
+ ),
185
+ instruction = (
186
+ "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
187
+ "invoke your tool 'responses_api' to retrieve the questions. "
188
+ "Once you receive the list of questions, loop over each question and provide a concise answer for each based on the question and any provided file. "
189
+ "For every answer, return a dictionary with the keys task_id and submitted_answer, for example: "
190
+ "{'task_id': 'the-task-id', 'submitted_answer': 'your answer'}. "
191
+ "Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
192
+ )
193
+
194
+ ,
195
+ tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
196
+ agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
197
+ # Add the responses_api agent as a tool
198
+ #sub_agents=[responses_api]
199
+ )
200
+
201
+ # root_agent = LlmAgent(
202
+ # name='gaiaAgent',
203
+ # model="gemini-2.5-pro-preview-05-06",
204
+ # description=(
205
+ # "You are a smart agent that can answer any questions provided access the given files and answer"
206
+ # ),
207
+ # instruction = (
208
+ # "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
209
+ # "invoke base agent. "
210
+ # "Once you the answers check if are in correct format. "
211
+ # #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
212
+ # )
213
+
214
+ # ,
215
+ # #tools=[submit_api],
216
+ # # Add the responses_api agent as a tool
217
+ # sub_agents=[base_agent]
218
+ # )
219
+
220
+ session_service = InMemorySessionService()
221
+ session = session_service.create_session(app_name=APP_NAME, \
222
+ user_id=USER_ID,\
223
+ session_id=SESSION_ID)
224
+
225
+ runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
226
+ # # def send_query_to_agent(root_agent, query, session):
227
+ # # session = session
228
+ # # content = types.Content(role='user', parts=[types.Part(text=query)])
229
+
230
+ # # async def main():
231
+ # # await process_questions_and_answer()
232
+
233
+ # # if __name__ == "__main__":
234
+ # # import asyncio
235
+ # # asyncio.run(main())