Spaces:
Running
Running
Nathan Brake
commited on
Make location/framework/date/distance be inputs rather than config controlled by default (#55)
Browse files* Simplify configs so that selecting framework, time location etc is a game-time decision with defaults
* bad getter
* more cleanup and handling of MCP params
- examples/langchain_single_agent.yaml +0 -8
- examples/langchain_single_agent_vertical.yaml +0 -17
- examples/llama_index_single_agent.yaml +0 -8
- examples/llama_index_single_agent_vertical.yaml +0 -17
- examples/{openai_multi_agent.yaml β multi_agent.yaml} +7 -14
- examples/openai_single_agent.yaml +0 -8
- examples/openai_single_agent_user_confirmation.yaml +0 -36
- examples/{langchain_single_agent_user_confirmation.yaml β single_agent_user_confirmation.yaml} +4 -9
- examples/{openai_single_agent_vertical.yaml β single_agent_with_tools.yaml} +1 -7
- examples/smolagents_single_agent.yaml +0 -8
- examples/smolagents_single_agent_user_confirmation.yaml +0 -37
- examples/smolagents_single_agent_vertical.yaml +0 -19
- pyproject.toml +2 -1
- src/surf_spot_finder/cli.py +20 -6
- src/surf_spot_finder/config.py +169 -16
- src/surf_spot_finder/no_framework.py +1 -2
examples/langchain_single_agent.yaml
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-02 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: langchain
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: o3-mini
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/langchain_single_agent_vertical.yaml
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-10 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: langchain
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: o3-mini
|
9 |
-
tools:
|
10 |
-
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
-
- "surf_spot_finder.tools.get_area_lat_lon"
|
12 |
-
- "surf_spot_finder.tools.get_surfing_spots"
|
13 |
-
- "surf_spot_finder.tools.get_wave_forecast"
|
14 |
-
- "surf_spot_finder.tools.get_wind_forecast"
|
15 |
-
- "any_agent.tools.search_web"
|
16 |
-
- "any_agent.tools.show_plan"
|
17 |
-
- "any_agent.tools.visit_webpage"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/llama_index_single_agent.yaml
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-02 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: llama_index
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: o3-mini
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/llama_index_single_agent_vertical.yaml
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-10 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: llama_index
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: o3-mini
|
9 |
-
tools:
|
10 |
-
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
-
- "surf_spot_finder.tools.get_area_lat_lon"
|
12 |
-
- "surf_spot_finder.tools.get_surfing_spots"
|
13 |
-
- "surf_spot_finder.tools.get_wave_forecast"
|
14 |
-
- "surf_spot_finder.tools.get_wind_forecast"
|
15 |
-
- "any_agent.tools.search_web"
|
16 |
-
- "any_agent.tools.show_plan"
|
17 |
-
- "any_agent.tools.visit_webpage"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/{openai_multi_agent.yaml β multi_agent.yaml}
RENAMED
@@ -1,15 +1,9 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-02 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: openai
|
6 |
-
|
7 |
main_agent:
|
8 |
name: main_agent
|
9 |
-
model_id:
|
10 |
instructions: >
|
11 |
# System context\n
|
12 |
-
You are part of a multi-agent system
|
13 |
Agents uses two primary abstraction: **Agents** and **Handoffs**.
|
14 |
An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate.
|
15 |
Handoffs are achieved by calling a handoff function, generally named `transfer_to_<agent_name>`.
|
@@ -17,22 +11,21 @@ main_agent:
|
|
17 |
|
18 |
managed_agents:
|
19 |
|
20 |
-
- name: user-
|
21 |
-
model_id:
|
22 |
instructions: Ask users to verify a step, plan or answer.
|
23 |
tools:
|
24 |
- any_agent.tools.ask_user_verification
|
25 |
|
26 |
- name: general-web-search-agent
|
27 |
-
model_id:
|
28 |
instructions: Search the web and visit webpages to find answers.
|
29 |
tools:
|
30 |
- any_agent.tools.search_web
|
31 |
- any_agent.tools.visit_webpage
|
32 |
|
33 |
- name: user-communication-agent
|
34 |
-
model_id:
|
35 |
instructions: Communicates to the user
|
36 |
-
handoff: True
|
37 |
tools:
|
38 |
-
- any_agent.tools.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
main_agent:
|
2 |
name: main_agent
|
3 |
+
model_id: # optional, will prompt for it if not provided
|
4 |
instructions: >
|
5 |
# System context\n
|
6 |
+
You are part of a multi-agent system, designed to make agent coordination and execution easy.
|
7 |
Agents uses two primary abstraction: **Agents** and **Handoffs**.
|
8 |
An agent encompasses instructions and tools and can hand off a conversation to another agent when appropriate.
|
9 |
Handoffs are achieved by calling a handoff function, generally named `transfer_to_<agent_name>`.
|
|
|
11 |
|
12 |
managed_agents:
|
13 |
|
14 |
+
- name: user-verification-agent
|
15 |
+
model_id: # optional, will prompt for it if not provided
|
16 |
instructions: Ask users to verify a step, plan or answer.
|
17 |
tools:
|
18 |
- any_agent.tools.ask_user_verification
|
19 |
|
20 |
- name: general-web-search-agent
|
21 |
+
model_id: # optional, will prompt for it if not provided
|
22 |
instructions: Search the web and visit webpages to find answers.
|
23 |
tools:
|
24 |
- any_agent.tools.search_web
|
25 |
- any_agent.tools.visit_webpage
|
26 |
|
27 |
- name: user-communication-agent
|
28 |
+
model_id: # optional, will prompt for it if not provided
|
29 |
instructions: Communicates to the user
|
|
|
30 |
tools:
|
31 |
+
- any_agent.tools.show_final_output
|
examples/openai_single_agent.yaml
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-02 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: openai
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: o3-mini
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/openai_single_agent_user_confirmation.yaml
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
|
2 |
-
location: Pontevedra
|
3 |
-
date: 2025-04-10 12:00
|
4 |
-
max_driving_hours: 2
|
5 |
-
input_prompt_template: |
|
6 |
-
According to the forecast, what will be the best spot to surf around {LOCATION},
|
7 |
-
in a {MAX_DRIVING_HOURS} hour driving radius, at {DATE}?
|
8 |
-
Find a few options and then discuss it with David de la Iglesia Castro. You should recommend him some choices,
|
9 |
-
and then confirm the final selection with him.
|
10 |
-
Once he gives the final selection, save a detailed description of the weather at the chosen location into a file
|
11 |
-
named "final_answer.txt". Also save a file called "history.txt" which has a list of your thought process in the choice.
|
12 |
-
framework: openai
|
13 |
-
|
14 |
-
main_agent:
|
15 |
-
model_id: gpt-4o
|
16 |
-
tools:
|
17 |
-
- "surf_spot_finder.tools.driving_hours_to_meters"
|
18 |
-
- "surf_spot_finder.tools.get_area_lat_lon"
|
19 |
-
- "surf_spot_finder.tools.get_surfing_spots"
|
20 |
-
- "surf_spot_finder.tools.get_wave_forecast"
|
21 |
-
- "surf_spot_finder.tools.get_wind_forecast"
|
22 |
-
- "any_agent.tools.send_console_message"
|
23 |
-
- command: "docker"
|
24 |
-
args:
|
25 |
-
- "run"
|
26 |
-
- "-i"
|
27 |
-
- "--rm"
|
28 |
-
- "--mount"
|
29 |
-
- "type=bind,src=/tmp/surf-spot-finder,dst=/projects"
|
30 |
-
- "mcp/filesystem"
|
31 |
-
- "/projects"
|
32 |
-
tools:
|
33 |
-
- "read_file"
|
34 |
-
- "write_file"
|
35 |
-
- "directory_tree"
|
36 |
-
- "list_allowed_directories"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/{langchain_single_agent_user_confirmation.yaml β single_agent_user_confirmation.yaml}
RENAMED
@@ -1,19 +1,14 @@
|
|
1 |
|
2 |
-
location: Pontevedra
|
3 |
-
date: 2025-04-05 12:00
|
4 |
-
max_driving_hours: 2
|
5 |
input_prompt_template: |
|
6 |
According to the forecast, what will be the best spot to surf around {LOCATION},
|
7 |
in a {MAX_DRIVING_HOURS} hour driving radius, at {DATE}?
|
8 |
-
Find a few options and then discuss it with
|
9 |
-
and then confirm the final selection with him.
|
10 |
Once he gives the final selection, save a detailed description of the weather at the chosen location into a file
|
11 |
named "final_answer.txt". Also save a file called "history.txt" which has a list of your thought process in the choice.
|
12 |
|
13 |
-
framework: langchain
|
14 |
-
|
15 |
main_agent:
|
16 |
-
model_id:
|
17 |
tools:
|
18 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
19 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
@@ -27,7 +22,7 @@ main_agent:
|
|
27 |
- "-i"
|
28 |
- "--rm"
|
29 |
- "--mount"
|
30 |
-
- "type=bind,src
|
31 |
- "mcp/filesystem"
|
32 |
- "/projects"
|
33 |
tools:
|
|
|
1 |
|
|
|
|
|
|
|
2 |
input_prompt_template: |
|
3 |
According to the forecast, what will be the best spot to surf around {LOCATION},
|
4 |
in a {MAX_DRIVING_HOURS} hour driving radius, at {DATE}?
|
5 |
+
Find a few options and then discuss it with a friend using the send_console_message any_agent.tools.ask_user_verification
|
6 |
+
You should recommend some choices to them and then confirm the final selection with him.
|
7 |
Once he gives the final selection, save a detailed description of the weather at the chosen location into a file
|
8 |
named "final_answer.txt". Also save a file called "history.txt" which has a list of your thought process in the choice.
|
9 |
|
|
|
|
|
10 |
main_agent:
|
11 |
+
model_id: # optional, will prompt for it if not provided
|
12 |
tools:
|
13 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
14 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
|
|
22 |
- "-i"
|
23 |
- "--rm"
|
24 |
- "--mount"
|
25 |
+
- "type=bind,src={{ path_variable }},dst=/projects" # this is the custom part where for demo purposes we will check this in config.py
|
26 |
- "mcp/filesystem"
|
27 |
- "/projects"
|
28 |
tools:
|
examples/{openai_single_agent_vertical.yaml β single_agent_with_tools.yaml}
RENAMED
@@ -1,11 +1,5 @@
|
|
1 |
-
location: Lisbon
|
2 |
-
date: 2025-04-08 19:00
|
3 |
-
max_driving_hours: 1
|
4 |
-
|
5 |
-
framework: openai
|
6 |
-
|
7 |
main_agent:
|
8 |
-
model_id:
|
9 |
tools:
|
10 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
main_agent:
|
2 |
+
model_id: # optional, will prompt for it if not provided
|
3 |
tools:
|
4 |
- "surf_spot_finder.tools.driving_hours_to_meters"
|
5 |
- "surf_spot_finder.tools.get_area_lat_lon"
|
examples/smolagents_single_agent.yaml
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-02 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: smolagents
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: openai/o3-mini
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/smolagents_single_agent_user_confirmation.yaml
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-05 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
input_prompt_template: |
|
5 |
-
According to the forecast, what will be the best spot to surf around {LOCATION},
|
6 |
-
in a {MAX_DRIVING_HOURS} hour driving radius, at {DATE}?
|
7 |
-
Find a few options and then discuss it with David de la Iglesia Castro. You should recommend him some choices,
|
8 |
-
and then confirm the final selection with him.
|
9 |
-
Once he gives the final selection, save a detailed description of the weather at the chosen location into a file
|
10 |
-
named "final_answer.txt". Also save a file called "history.txt" which has a list of your thought process in the choice.
|
11 |
-
|
12 |
-
framework: smolagents
|
13 |
-
|
14 |
-
main_agent:
|
15 |
-
model_id: openai/gpt-4o
|
16 |
-
tools:
|
17 |
-
- "surf_spot_finder.tools.driving_hours_to_meters"
|
18 |
-
- "surf_spot_finder.tools.get_area_lat_lon"
|
19 |
-
- "surf_spot_finder.tools.get_surfing_spots"
|
20 |
-
- "surf_spot_finder.tools.get_wave_forecast"
|
21 |
-
- "surf_spot_finder.tools.get_wind_forecast"
|
22 |
-
- "any_agent.tools.send_console_message"
|
23 |
-
- "smolagents.FinalAnswerTool"
|
24 |
-
- command: "docker"
|
25 |
-
args:
|
26 |
-
- "run"
|
27 |
-
- "-i"
|
28 |
-
- "--rm"
|
29 |
-
- "--mount"
|
30 |
-
- "type=bind,src=/tmp/surf-spot-finder,dst=/projects"
|
31 |
-
- "mcp/filesystem"
|
32 |
-
- "/projects"
|
33 |
-
tools:
|
34 |
-
- "read_file"
|
35 |
-
- "write_file"
|
36 |
-
- "directory_tree"
|
37 |
-
- "list_allowed_directories"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/smolagents_single_agent_vertical.yaml
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
location: Pontevedra
|
2 |
-
date: 2025-04-10 12:00
|
3 |
-
max_driving_hours: 2
|
4 |
-
|
5 |
-
framework: smolagents
|
6 |
-
|
7 |
-
main_agent:
|
8 |
-
model_id: openai/o3-mini
|
9 |
-
tools:
|
10 |
-
- "surf_spot_finder.tools.driving_hours_to_meters"
|
11 |
-
- "surf_spot_finder.tools.get_area_lat_lon"
|
12 |
-
- "surf_spot_finder.tools.get_surfing_spots"
|
13 |
-
- "surf_spot_finder.tools.get_wave_forecast"
|
14 |
-
- "surf_spot_finder.tools.get_wind_forecast"
|
15 |
-
- "any_agent.tools.search_web"
|
16 |
-
- "any_agent.tools.show_plan"
|
17 |
-
- "any_agent.tools.visit_webpage"
|
18 |
-
- "smolagents.PythonInterpreterTool"
|
19 |
-
- "smolagents.FinalAnswerTool"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pyproject.toml
CHANGED
@@ -11,10 +11,11 @@ dynamic = ["version"]
|
|
11 |
dependencies = [
|
12 |
"any-agent[all]",
|
13 |
"fire",
|
14 |
-
"loguru",
|
15 |
"pydantic",
|
16 |
"pyyaml",
|
17 |
"litellm",
|
|
|
|
|
18 |
]
|
19 |
|
20 |
[project.optional-dependencies]
|
|
|
11 |
dependencies = [
|
12 |
"any-agent[all]",
|
13 |
"fire",
|
|
|
14 |
"pydantic",
|
15 |
"pyyaml",
|
16 |
"litellm",
|
17 |
+
"geocoder>=1.38.1",
|
18 |
+
"rich>=14.0.0",
|
19 |
]
|
20 |
|
21 |
[project.optional-dependencies]
|
src/surf_spot_finder/cli.py
CHANGED
@@ -1,6 +1,10 @@
|
|
|
|
|
|
|
|
|
|
1 |
from any_agent import AgentFramework, AnyAgent, TracingConfig
|
2 |
from fire import Fire
|
3 |
-
from
|
4 |
|
5 |
from surf_spot_finder.config import (
|
6 |
Config,
|
@@ -10,9 +14,8 @@ from surf_spot_finder.instructions.openai import SINGLE_AGENT_SYSTEM_PROMPT
|
|
10 |
from surf_spot_finder.instructions.smolagents import SYSTEM_PROMPT
|
11 |
|
12 |
|
13 |
-
@logger.catch(reraise=True)
|
14 |
async def find_surf_spot(
|
15 |
-
config_file: str,
|
16 |
) -> str:
|
17 |
"""Find the best surf spot based on the given criteria.
|
18 |
|
@@ -21,8 +24,11 @@ async def find_surf_spot(
|
|
21 |
See [Config][surf_spot_finder.config.Config]
|
22 |
|
23 |
"""
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
26 |
|
27 |
if not config.main_agent.instructions:
|
28 |
if config.framework == AgentFramework.SMOLAGENTS:
|
@@ -47,7 +53,15 @@ async def find_surf_spot(
|
|
47 |
logger.info(f"Running agent with query:\n{query}")
|
48 |
agent_trace = await agent.run_async(query)
|
49 |
|
50 |
-
logger.info(f"Final
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
|
53 |
def main():
|
|
|
1 |
+
import datetime
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
from any_agent import AgentFramework, AnyAgent, TracingConfig
|
6 |
from fire import Fire
|
7 |
+
from any_agent.logging import logger
|
8 |
|
9 |
from surf_spot_finder.config import (
|
10 |
Config,
|
|
|
14 |
from surf_spot_finder.instructions.smolagents import SYSTEM_PROMPT
|
15 |
|
16 |
|
|
|
17 |
async def find_surf_spot(
|
18 |
+
config_file: str | None = None,
|
19 |
) -> str:
|
20 |
"""Find the best surf spot based on the given criteria.
|
21 |
|
|
|
24 |
See [Config][surf_spot_finder.config.Config]
|
25 |
|
26 |
"""
|
27 |
+
if config_file is None:
|
28 |
+
config = Config.from_dict({})
|
29 |
+
else:
|
30 |
+
logger.info(f"Loading {config_file}")
|
31 |
+
config = Config.from_yaml(config_file)
|
32 |
|
33 |
if not config.main_agent.instructions:
|
34 |
if config.framework == AgentFramework.SMOLAGENTS:
|
|
|
53 |
logger.info(f"Running agent with query:\n{query}")
|
54 |
agent_trace = await agent.run_async(query)
|
55 |
|
56 |
+
logger.info(f"Final output from agent:\n{agent_trace.final_output}")
|
57 |
+
|
58 |
+
# dump the trace in the "output" directory
|
59 |
+
output_dir = "output"
|
60 |
+
os.makedirs(output_dir, exist_ok=True)
|
61 |
+
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
62 |
+
file_path = Path(output_dir) / f"{timestamp}_trace.json"
|
63 |
+
with open(file_path, "w") as f:
|
64 |
+
f.write(agent_trace.model_dump_json(indent=2))
|
65 |
|
66 |
|
67 |
def main():
|
src/surf_spot_finder/config.py
CHANGED
@@ -1,10 +1,17 @@
|
|
|
|
|
|
1 |
from typing import Annotated
|
2 |
-
|
3 |
from any_agent import AgentFramework
|
4 |
from any_agent.config import AgentConfig
|
5 |
from pydantic import AfterValidator, BaseModel, ConfigDict, FutureDatetime, PositiveInt
|
6 |
import yaml
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
INPUT_PROMPT_TEMPLATE = """
|
10 |
According to the forecast, what will be the best spot to surf around {LOCATION},
|
@@ -20,6 +27,112 @@ def validate_prompt(value) -> str:
|
|
20 |
return value
|
21 |
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
class Config(BaseModel):
|
24 |
model_config = ConfigDict(extra="forbid")
|
25 |
|
@@ -36,41 +149,81 @@ class Config(BaseModel):
|
|
36 |
managed_agents: list[AgentConfig] | None = None
|
37 |
|
38 |
@classmethod
|
39 |
-
def
|
40 |
"""
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
|
45 |
Returns:
|
46 |
-
Config: A new Config instance populated with values from the
|
47 |
"""
|
48 |
-
with open(yaml_path, "r") as f:
|
49 |
-
data = yaml.safe_load(f)
|
50 |
# for each tool listed in main_agent.tools, use import lib to import it and replace the str with the callable
|
51 |
callables = []
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
if isinstance(tool, str):
|
54 |
module_name, func_name = tool.rsplit(".", 1)
|
55 |
module = __import__(module_name, fromlist=[func_name])
|
56 |
-
print(f"Importing {tool}")
|
57 |
callables.append(getattr(module, func_name))
|
58 |
else:
|
59 |
# this means it must be an MCPStdioParams
|
60 |
-
|
|
|
|
|
|
|
61 |
data["main_agent"]["tools"] = callables
|
62 |
for agent in data.get("managed_agents", []):
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
callables = []
|
64 |
for tool in agent.get("tools", []):
|
65 |
if isinstance(tool, str):
|
66 |
module_name, func_name = tool.rsplit(".", 1)
|
67 |
module = __import__(module_name, fromlist=[func_name])
|
68 |
-
print(f"Importing {tool}")
|
69 |
callables.append(getattr(module, func_name))
|
70 |
else:
|
71 |
# this means it must be an MCPStdioParams
|
72 |
-
|
|
|
73 |
agent["tools"] = callables
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
return cls(**data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import tempfile
|
3 |
from typing import Annotated
|
4 |
+
from datetime import datetime, timedelta
|
5 |
from any_agent import AgentFramework
|
6 |
from any_agent.config import AgentConfig
|
7 |
from pydantic import AfterValidator, BaseModel, ConfigDict, FutureDatetime, PositiveInt
|
8 |
import yaml
|
9 |
+
from rich.prompt import Prompt
|
10 |
+
from any_agent.logging import logger
|
11 |
+
import geocoder
|
12 |
+
from litellm.litellm_core_utils.get_llm_provider_logic import (
|
13 |
+
get_llm_provider,
|
14 |
+
)
|
15 |
|
16 |
INPUT_PROMPT_TEMPLATE = """
|
17 |
According to the forecast, what will be the best spot to surf around {LOCATION},
|
|
|
27 |
return value
|
28 |
|
29 |
|
30 |
+
def ask_framework() -> AgentFramework:
|
31 |
+
"""
|
32 |
+
Ask the user which framework they would like to use. They must select one of the Agent Frameworks
|
33 |
+
"""
|
34 |
+
frameworks = [framework.name for framework in AgentFramework]
|
35 |
+
frameworks_str = "\n".join(
|
36 |
+
[f"{i}: {framework}" for i, framework in enumerate(frameworks)]
|
37 |
+
)
|
38 |
+
prompt = f"Select the agent framework to use:\n{frameworks_str}\n"
|
39 |
+
choice = Prompt.ask(prompt, default="3")
|
40 |
+
try:
|
41 |
+
choice = int(choice)
|
42 |
+
if choice < 0 or choice >= len(frameworks):
|
43 |
+
raise ValueError("Invalid choice")
|
44 |
+
return AgentFramework[frameworks[choice]]
|
45 |
+
except ValueError:
|
46 |
+
raise ValueError("Invalid choice")
|
47 |
+
|
48 |
+
|
49 |
+
def date_picker() -> FutureDatetime:
|
50 |
+
"""
|
51 |
+
Ask the user to select a date in the future. The date must be at least 1 day in the future.
|
52 |
+
"""
|
53 |
+
prompt = "Select a date in the future (YYYY-MM-DD-HH)"
|
54 |
+
# the default should be the current date + 1 day
|
55 |
+
now = datetime.now()
|
56 |
+
default_val = (now + timedelta(days=1)).strftime("%Y-%m-%d-%H")
|
57 |
+
date_str = Prompt.ask(prompt, default=default_val)
|
58 |
+
try:
|
59 |
+
year, month, day, hour = map(int, date_str.split("-"))
|
60 |
+
date = datetime(year, month, day, hour)
|
61 |
+
return date
|
62 |
+
except ValueError:
|
63 |
+
raise ValueError("Invalid date format. Please use YYYY-MM-DD-HH.")
|
64 |
+
|
65 |
+
|
66 |
+
def location_picker() -> str:
|
67 |
+
"""
|
68 |
+
Ask the user to input a location. By default use the current location based on the IP address.
|
69 |
+
"""
|
70 |
+
prompt = "Enter a location"
|
71 |
+
g = geocoder.ip("me")
|
72 |
+
default_val = f"{g.city} {g.state}, {g.country}"
|
73 |
+
location = Prompt.ask(prompt, default=default_val)
|
74 |
+
if not location:
|
75 |
+
raise ValueError("location cannot be empty")
|
76 |
+
return location
|
77 |
+
|
78 |
+
|
79 |
+
def max_driving_hours_picker() -> int:
|
80 |
+
"""
|
81 |
+
Ask the user to input the maximum driving hours. The default is 2 hours.
|
82 |
+
"""
|
83 |
+
prompt = "Enter the maximum driving hours"
|
84 |
+
default_val = str(2)
|
85 |
+
max_driving_hours = Prompt.ask(prompt, default=default_val)
|
86 |
+
try:
|
87 |
+
max_driving_hours = int(max_driving_hours)
|
88 |
+
if max_driving_hours <= 0:
|
89 |
+
raise ValueError("Invalid choice")
|
90 |
+
return max_driving_hours
|
91 |
+
except ValueError:
|
92 |
+
raise ValueError("Invalid choice")
|
93 |
+
|
94 |
+
|
95 |
+
def get_litellm_model_id(agent_name) -> str:
|
96 |
+
"""
|
97 |
+
Ask the user to input a model_id string. Validate it using the litellm.validate_environment function
|
98 |
+
"""
|
99 |
+
from litellm.utils import validate_environment
|
100 |
+
|
101 |
+
prompt = f"Enter a valid model_id for agent {agent_name} using LiteLLM syntax"
|
102 |
+
default_val = "openai/gpt-4o"
|
103 |
+
model_id = Prompt.ask(prompt, default=default_val)
|
104 |
+
# make a call to validate the model id: this will throw an error if the model id is not valid
|
105 |
+
get_llm_provider(model=model_id)
|
106 |
+
# make a call to validate that the environment is correct for the model id
|
107 |
+
env_check = validate_environment(model_id)
|
108 |
+
if not env_check["keys_in_environment"]:
|
109 |
+
msg = f"{env_check['missing_keys']} needed for {model_id}"
|
110 |
+
raise ValueError(msg)
|
111 |
+
return model_id
|
112 |
+
|
113 |
+
|
114 |
+
def set_mcp_settings(tool):
|
115 |
+
logger.info(
|
116 |
+
f"This MCP uses {tool['command']}. If you don't have this set up this will not work"
|
117 |
+
)
|
118 |
+
if "mcp/filesystem" not in tool["args"]:
|
119 |
+
msg = "The only MCP that this demo supports is the filesystem MCP"
|
120 |
+
raise ValueError(msg)
|
121 |
+
if not any("{{ path_variable }}" in arg for arg in tool["args"]):
|
122 |
+
msg = "The filesystem MCP must have { path_variable } in the args list"
|
123 |
+
raise ValueError(msg)
|
124 |
+
for idx, item in enumerate(tool["args"]):
|
125 |
+
if "{{ path_variable }}" in item:
|
126 |
+
default_val = os.path.join(tempfile.gettempdir(), "surf_spot_finder")
|
127 |
+
answer = Prompt.ask(
|
128 |
+
"Please enter the path you'd like the Filesystem MCP to access",
|
129 |
+
default=default_val,
|
130 |
+
)
|
131 |
+
os.makedirs(answer, exist_ok=True)
|
132 |
+
tool["args"][idx] = item.replace("{{ path_variable }}", answer)
|
133 |
+
return tool
|
134 |
+
|
135 |
+
|
136 |
class Config(BaseModel):
|
137 |
model_config = ConfigDict(extra="forbid")
|
138 |
|
|
|
149 |
managed_agents: list[AgentConfig] | None = None
|
150 |
|
151 |
@classmethod
|
152 |
+
def from_dict(cls, data: dict) -> "Config":
|
153 |
"""
|
154 |
+
Create a Config instance from a dictionary.
|
155 |
+
Args:
|
156 |
+
data (dict): A dictionary containing the configuration data.
|
157 |
|
158 |
Returns:
|
159 |
+
Config: A new Config instance populated with values from the dictionary.
|
160 |
"""
|
|
|
|
|
161 |
# for each tool listed in main_agent.tools, use import lib to import it and replace the str with the callable
|
162 |
callables = []
|
163 |
+
if data.get("main_agent") is None:
|
164 |
+
data["main_agent"] = {}
|
165 |
+
if not data["main_agent"].get("model_id"):
|
166 |
+
data["main_agent"]["model_id"] = get_litellm_model_id("main_agent")
|
167 |
+
else:
|
168 |
+
logger.info(f"Main agent using model_id {data['main_agent']['model_id']}")
|
169 |
+
for tool in data["main_agent"].get("tools", []):
|
170 |
if isinstance(tool, str):
|
171 |
module_name, func_name = tool.rsplit(".", 1)
|
172 |
module = __import__(module_name, fromlist=[func_name])
|
|
|
173 |
callables.append(getattr(module, func_name))
|
174 |
else:
|
175 |
# this means it must be an MCPStdioParams
|
176 |
+
# For the purposes of this demo, currently we just look for the filesystem MCP which we have a placeholder
|
177 |
+
# for the path variable (which controls which dirs the MCP will have access to).
|
178 |
+
mcp_tool = set_mcp_settings(tool)
|
179 |
+
callables.append(mcp_tool)
|
180 |
data["main_agent"]["tools"] = callables
|
181 |
for agent in data.get("managed_agents", []):
|
182 |
+
if agent.get("model_id") is None:
|
183 |
+
agent["model_id"] = get_litellm_model_id(
|
184 |
+
agent.get("name", "managed_agent")
|
185 |
+
)
|
186 |
+
else:
|
187 |
+
logger.info(f"Agent {agent['name']} using model_id {agent['model_id']}")
|
188 |
callables = []
|
189 |
for tool in agent.get("tools", []):
|
190 |
if isinstance(tool, str):
|
191 |
module_name, func_name = tool.rsplit(".", 1)
|
192 |
module = __import__(module_name, fromlist=[func_name])
|
|
|
193 |
callables.append(getattr(module, func_name))
|
194 |
else:
|
195 |
# this means it must be an MCPStdioParams
|
196 |
+
mcp_tool = set_mcp_settings(tool)
|
197 |
+
callables.append(mcp_tool)
|
198 |
agent["tools"] = callables
|
199 |
+
if not data.get("framework"):
|
200 |
+
data["framework"] = ask_framework()
|
201 |
+
else:
|
202 |
+
logger.info(f"Using framework {data['framework']}")
|
203 |
+
if not data.get("location"):
|
204 |
+
data["location"] = location_picker()
|
205 |
+
else:
|
206 |
+
logger.info(f"Using location {data['location']}")
|
207 |
+
if not data.get("max_driving_hours"):
|
208 |
+
data["max_driving_hours"] = max_driving_hours_picker()
|
209 |
+
else:
|
210 |
+
logger.info(f"Using max driving hours {data['max_driving_hours']}")
|
211 |
+
if not data.get("date"):
|
212 |
+
data["date"] = date_picker()
|
213 |
+
else:
|
214 |
+
logger.info(f"Using date {data['date']}")
|
215 |
return cls(**data)
|
216 |
+
|
217 |
+
@classmethod
|
218 |
+
def from_yaml(cls, yaml_path: str) -> "Config":
|
219 |
+
"""
|
220 |
+
with open(yaml_path, "r") as f:
|
221 |
+
data = yaml.safe_load(f)
|
222 |
+
return cls(**data) yaml_path: Path to the YAML configuration file
|
223 |
+
|
224 |
+
Returns:
|
225 |
+
Config: A new Config instance populated with values from the YAML file
|
226 |
+
"""
|
227 |
+
with open(yaml_path, "r") as f:
|
228 |
+
data = yaml.safe_load(f)
|
229 |
+
return cls.from_dict(data)
|
src/surf_spot_finder/no_framework.py
CHANGED
@@ -3,7 +3,7 @@ from datetime import datetime
|
|
3 |
|
4 |
from fire import Fire
|
5 |
from litellm import completion
|
6 |
-
from
|
7 |
from pydantic import BaseModel
|
8 |
|
9 |
from any_agent.tools.web_browsing import search_web, visit_webpage
|
@@ -23,7 +23,6 @@ class SpotScore(BaseModel):
|
|
23 |
reason: str
|
24 |
|
25 |
|
26 |
-
@logger.catch(reraise=True)
|
27 |
def find_surf_spot_no_framework(
|
28 |
location: str, max_driving_hours: int, date: datetime, model_id: str
|
29 |
) -> list[SpotScore]:
|
|
|
3 |
|
4 |
from fire import Fire
|
5 |
from litellm import completion
|
6 |
+
from any_agent.logging import logger
|
7 |
from pydantic import BaseModel
|
8 |
|
9 |
from any_agent.tools.web_browsing import search_web, visit_webpage
|
|
|
23 |
reason: str
|
24 |
|
25 |
|
|
|
26 |
def find_surf_spot_no_framework(
|
27 |
location: str, max_driving_hours: int, date: datetime, model_id: str
|
28 |
) -> list[SpotScore]:
|