fixed built_in_code_executors bug
Browse files
app.py
CHANGED
@@ -12,7 +12,8 @@ from typing import AsyncGenerator
|
|
12 |
from google.genai import types as genai_types
|
13 |
from google.adk.tools import ToolContext, FunctionTool
|
14 |
import logging
|
15 |
-
from google.adk.tools import built_in_code_execution
|
|
|
16 |
from google.adk.tools import agent_tool
|
17 |
|
18 |
|
@@ -23,34 +24,34 @@ from google.adk.tools import agent_tool
|
|
23 |
|
24 |
|
25 |
logging.basicConfig(level=logging.ERROR)
|
26 |
-
|
27 |
url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
28 |
headers = {'accept': 'application/json'}
|
29 |
response = requests.get(url, headers=headers)
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
#
|
34 |
-
#
|
35 |
-
#
|
36 |
-
#
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
#
|
53 |
-
|
54 |
|
55 |
|
56 |
|
@@ -93,14 +94,14 @@ def submit_questions(answers: list[str]) -> Dict[str, Any]:
|
|
93 |
responses_api = FunctionTool(func= answer_questions)
|
94 |
submit_api = FunctionTool(func=submit_questions)
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
|
103 |
-
|
104 |
|
105 |
|
106 |
|
@@ -117,6 +118,7 @@ SESSION_ID="1234"
|
|
117 |
code_agent = LlmAgent(
|
118 |
name='codegaiaAgent',
|
119 |
model="gemini-2.5-pro-preview-05-06",
|
|
|
120 |
description=(
|
121 |
"You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
|
122 |
),
|
@@ -130,9 +132,9 @@ code_agent = LlmAgent(
|
|
130 |
)
|
131 |
|
132 |
,
|
133 |
-
tools=[built_in_code_execution],
|
134 |
# Add the responses_api agent as a tool
|
135 |
-
|
136 |
)
|
137 |
|
138 |
|
@@ -149,7 +151,7 @@ search_agent = LlmAgent(
|
|
149 |
,
|
150 |
tools=[google_search],
|
151 |
# Add the responses_api agent as a tool
|
152 |
-
|
153 |
)
|
154 |
|
155 |
image_agent = LlmAgent(
|
@@ -160,11 +162,8 @@ image_agent = LlmAgent(
|
|
160 |
),
|
161 |
instruction = (
|
162 |
"Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
|
163 |
-
|
164 |
,
|
165 |
-
|
166 |
-
# Add the responses_api agent as a tool
|
167 |
-
#sub_agents=[responses_api]
|
168 |
)
|
169 |
|
170 |
|
@@ -178,9 +177,7 @@ youtube_agent = LlmAgent(
|
|
178 |
"Get the youtube link associated use Gemini to watch the video and answer the provided question ")
|
179 |
|
180 |
,
|
181 |
-
|
182 |
-
# Add the responses_api agent as a tool
|
183 |
-
#sub_agents=[responses_api]
|
184 |
)
|
185 |
|
186 |
root_agent = LlmAgent(
|
@@ -202,27 +199,27 @@ root_agent = LlmAgent(
|
|
202 |
tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
|
203 |
agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
|
204 |
# Add the responses_api agent as a tool
|
205 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
)
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
#
|
211 |
-
|
212 |
-
|
213 |
-
# ),
|
214 |
-
# instruction = (
|
215 |
-
# "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
|
216 |
-
# "invoke base agent. "
|
217 |
-
# "Once you the answers check if are in correct format. "
|
218 |
-
# #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
|
219 |
-
# )
|
220 |
-
|
221 |
-
# ,
|
222 |
-
# #tools=[submit_api],
|
223 |
-
# # Add the responses_api agent as a tool
|
224 |
-
# sub_agents=[base_agent]
|
225 |
-
# )
|
226 |
|
227 |
session_service = InMemorySessionService()
|
228 |
session = session_service.create_session(app_name=APP_NAME, \
|
@@ -230,16 +227,16 @@ session = session_service.create_session(app_name=APP_NAME, \
|
|
230 |
session_id=SESSION_ID)
|
231 |
|
232 |
runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
|
237 |
|
238 |
|
239 |
|
240 |
-
|
241 |
-
|
242 |
|
243 |
-
|
244 |
-
|
245 |
-
|
|
|
12 |
from google.genai import types as genai_types
|
13 |
from google.adk.tools import ToolContext, FunctionTool
|
14 |
import logging
|
15 |
+
#from google.adk.tools import built_in_code_execution
|
16 |
+
from google.adk.code_executors import BuiltInCodeExecutor
|
17 |
from google.adk.tools import agent_tool
|
18 |
|
19 |
|
|
|
24 |
|
25 |
|
26 |
logging.basicConfig(level=logging.ERROR)
|
27 |
+
from google.adk.tools import agent_tool
|
28 |
url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
29 |
headers = {'accept': 'application/json'}
|
30 |
response = requests.get(url, headers=headers)
|
31 |
|
32 |
+
class responses_api(BaseAgent):
|
33 |
+
async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
|
34 |
+
# This method is called when the agent is run
|
35 |
+
# You can implement your logic here
|
36 |
+
# For example, you can call an external API or perform some calculations
|
37 |
+
# and return the result
|
38 |
+
url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
39 |
+
headers = {'accept': 'application/json'}
|
40 |
+
response = requests.get(url, headers=headers)
|
41 |
+
for i in response.json():
|
42 |
+
if i['file_name'] != '':
|
43 |
+
url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
|
44 |
+
question = i['question']
|
45 |
+
prompt = f"{question} and the file is {url_file}, give the final answer only"
|
46 |
+
else:
|
47 |
+
question = i['question']
|
48 |
+
prompt = f"{question} give the final answer only"
|
49 |
+
existing_responses = ctx.session.state.get("user:responses", [])
|
50 |
+
existing_responses.append(prompt)
|
51 |
+
ctx.session_state["user:responses"] = existing_responses
|
52 |
+
|
53 |
+
# Optionally, yield a single event to indicate completion or provide some output
|
54 |
+
yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions."))])
|
55 |
|
56 |
|
57 |
|
|
|
94 |
responses_api = FunctionTool(func= answer_questions)
|
95 |
submit_api = FunctionTool(func=submit_questions)
|
96 |
|
97 |
+
class QuestionAnswerer(LlmAgent):
|
98 |
+
async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
|
99 |
+
questions_to_answer = ctx.session_service.get('fetched_questions', [])
|
100 |
+
for q in questions_to_answer:
|
101 |
+
answer = await self._llm(messages=[types.ChatMessage(role="user", parts=[types.Part(text=q)])])
|
102 |
+
yield Event(author=self.name, content=answer.content)
|
103 |
|
104 |
+
qa = QuestionAnswerer(name = 'qa_1', model="gemini-2.0-flash", description="Question Answerer")
|
105 |
|
106 |
|
107 |
|
|
|
118 |
code_agent = LlmAgent(
|
119 |
name='codegaiaAgent',
|
120 |
model="gemini-2.5-pro-preview-05-06",
|
121 |
+
code_executor=BuiltInCodeExecutor(),
|
122 |
description=(
|
123 |
"You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
|
124 |
),
|
|
|
132 |
)
|
133 |
|
134 |
,
|
135 |
+
#tools=[built_in_code_execution],
|
136 |
# Add the responses_api agent as a tool
|
137 |
+
sub_agents=[responses_api]
|
138 |
)
|
139 |
|
140 |
|
|
|
151 |
,
|
152 |
tools=[google_search],
|
153 |
# Add the responses_api agent as a tool
|
154 |
+
sub_agents=[responses_api]
|
155 |
)
|
156 |
|
157 |
image_agent = LlmAgent(
|
|
|
162 |
),
|
163 |
instruction = (
|
164 |
"Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
|
|
|
165 |
,
|
166 |
+
tools=[google_search], sub_agents=[responses_api]
|
|
|
|
|
167 |
)
|
168 |
|
169 |
|
|
|
177 |
"Get the youtube link associated use Gemini to watch the video and answer the provided question ")
|
178 |
|
179 |
,
|
180 |
+
tools=[google_search], sub_agents=[responses_api]
|
|
|
|
|
181 |
)
|
182 |
|
183 |
root_agent = LlmAgent(
|
|
|
199 |
tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
|
200 |
agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
|
201 |
# Add the responses_api agent as a tool
|
202 |
+
sub_agents=[responses_api]
|
203 |
+
)
|
204 |
+
|
205 |
+
root_agent = LlmAgent(
|
206 |
+
name='gaiaAgent',
|
207 |
+
model="gemini-2.5-pro-preview-05-06",
|
208 |
+
description=(
|
209 |
+
"You are a smart agent that can answer any questions provided access the given files and answer"
|
210 |
+
),
|
211 |
+
instruction = (
|
212 |
+
"You are a helpful agent. When the user asks to get the questions or makes a similar request, "
|
213 |
+
"invoke base agent. "
|
214 |
+
"Once you the answers check if are in correct format. "
|
215 |
+
#"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
|
216 |
)
|
217 |
|
218 |
+
,
|
219 |
+
tools=[submit_api],
|
220 |
+
# Add the responses_api agent as a tool
|
221 |
+
sub_agents=[base_agent]
|
222 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
session_service = InMemorySessionService()
|
225 |
session = session_service.create_session(app_name=APP_NAME, \
|
|
|
227 |
session_id=SESSION_ID)
|
228 |
|
229 |
runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
|
230 |
+
def send_query_to_agent(root_agent, query, session):
|
231 |
+
session = session
|
232 |
+
content = types.Content(role='user', parts=[types.Part(text=query)])
|
233 |
|
234 |
|
235 |
|
236 |
|
237 |
+
async def main():
|
238 |
+
await process_questions_and_answer()
|
239 |
|
240 |
+
if __name__ == "__main__":
|
241 |
+
import asyncio
|
242 |
+
asyncio.run(main())
|