ashishja commited on
Commit
cfee38a
·
verified ·
1 Parent(s): 7f7e4c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -44
app.py CHANGED
@@ -28,29 +28,29 @@ url = 'https://agents-course-unit4-scoring.hf.space/questions'
28
  headers = {'accept': 'application/json'}
29
  response = requests.get(url, headers=headers)
30
 
31
- class responses_api(BaseAgent):
32
- async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
33
- # This method is called when the agent is run
34
- # You can implement your logic here
35
- # For example, you can call an external API or perform some calculations
36
- # and return the result
37
- url = 'https://agents-course-unit4-scoring.hf.space/questions'
38
- headers = {'accept': 'application/json'}
39
- response = requests.get(url, headers=headers)
40
- for i in response.json():
41
- if i['file_name'] != '':
42
- url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
43
- question = i['question']
44
- prompt = f"{question} and the file is {url_file}, give the final answer only"
45
- else:
46
- question = i['question']
47
- prompt = f"{question} give the final answer only"
48
- existing_responses = ctx.session.state.get("user:responses", [])
49
- existing_responses.append(prompt)
50
- ctx.session_state["user:responses"] = existing_responses
51
-
52
- # Optionally, yield a single event to indicate completion or provide some output
53
- yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions.")]))
54
 
55
 
56
 
@@ -162,9 +162,9 @@ image_agent = LlmAgent(
162
  "Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
163
 
164
  ,
165
- tools=[google_search],
166
- # Add the responses_api agent as a tool
167
- sub_agents=[responses_api]
168
  )
169
 
170
 
@@ -205,24 +205,24 @@ root_agent = LlmAgent(
205
  sub_agents=[responses_api]
206
  )
207
 
208
- root_agent = LlmAgent(
209
- name='gaiaAgent',
210
- model="gemini-2.5-pro-preview-05-06",
211
- description=(
212
- "You are a smart agent that can answer any questions provided access the given files and answer"
213
- ),
214
- instruction = (
215
- "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
216
- "invoke base agent. "
217
- "Once you the answers check if are in correct format. "
218
- #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
219
- )
220
-
221
- ,
222
- #tools=[submit_api],
223
- # Add the responses_api agent as a tool
224
- sub_agents=[base_agent]
225
- )
226
 
227
  session_service = InMemorySessionService()
228
  session = session_service.create_session(app_name=APP_NAME, \
 
28
  headers = {'accept': 'application/json'}
29
  response = requests.get(url, headers=headers)
30
 
31
+ # class responses_api(BaseAgent):
32
+ # async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
33
+ # # This method is called when the agent is run
34
+ # # You can implement your logic here
35
+ # # For example, you can call an external API or perform some calculations
36
+ # # and return the result
37
+ # url = 'https://agents-course-unit4-scoring.hf.space/questions'
38
+ # headers = {'accept': 'application/json'}
39
+ # response = requests.get(url, headers=headers)
40
+ # for i in response.json():
41
+ # if i['file_name'] != '':
42
+ # url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
43
+ # question = i['question']
44
+ # prompt = f"{question} and the file is {url_file}, give the final answer only"
45
+ # else:
46
+ # question = i['question']
47
+ # prompt = f"{question} give the final answer only"
48
+ # existing_responses = ctx.session.state.get("user:responses", [])
49
+ # existing_responses.append(prompt)
50
+ # ctx.session_state["user:responses"] = existing_responses
51
+
52
+ # # Optionally, yield a single event to indicate completion or provide some output
53
+ # yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions.")]))
54
 
55
 
56
 
 
162
  "Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
163
 
164
  ,
165
+ # tools=[google_search],
166
+ # # Add the responses_api agent as a tool
167
+ # sub_agents=[responses_api]
168
  )
169
 
170
 
 
205
  sub_agents=[responses_api]
206
  )
207
 
208
+ # root_agent = LlmAgent(
209
+ # name='gaiaAgent',
210
+ # model="gemini-2.5-pro-preview-05-06",
211
+ # description=(
212
+ # "You are a smart agent that can answer any questions provided access the given files and answer"
213
+ # ),
214
+ # instruction = (
215
+ # "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
216
+ # "invoke base agent. "
217
+ # "Once you the answers check if are in correct format. "
218
+ # #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
219
+ # )
220
+
221
+ # ,
222
+ # #tools=[submit_api],
223
+ # # Add the responses_api agent as a tool
224
+ # sub_agents=[base_agent]
225
+ # )
226
 
227
  session_service = InMemorySessionService()
228
  session = session_service.create_session(app_name=APP_NAME, \