Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,81 +1,182 @@
|
|
1 |
-
import
|
2 |
import datetime
|
3 |
import requests
|
4 |
import pytz
|
5 |
import yaml
|
6 |
-
from bs4 import BeautifulSoup # For web scraping
|
7 |
-
from PIL import Image # For image processing
|
8 |
-
from io import BytesIO
|
9 |
-
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
|
10 |
from tools.final_answer import FinalAnswerTool
|
11 |
from Gradio_UI import GradioUI
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
#
|
14 |
-
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
15 |
-
|
16 |
-
# Weather Tool (using wttr.in API, no API key needed)
|
17 |
@tool
|
18 |
-
def get_weather(
|
19 |
-
"""
|
|
|
20 |
Args:
|
21 |
-
|
22 |
"""
|
23 |
try:
|
24 |
-
|
|
|
|
|
25 |
response = requests.get(url)
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
except Exception as e:
|
28 |
-
return f"Error fetching weather: {str(e)}"
|
29 |
|
30 |
-
# Web
|
31 |
@tool
|
32 |
-
def
|
33 |
-
"""
|
|
|
34 |
Args:
|
35 |
-
url: The URL of the
|
|
|
36 |
"""
|
37 |
try:
|
38 |
-
headers = {
|
|
|
|
|
39 |
response = requests.get(url, headers=headers)
|
40 |
-
soup = BeautifulSoup(response.text, 'html.parser')
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
articles = [p.get_text() for p in soup.find_all('p')][:5]
|
45 |
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
except Exception as e:
|
48 |
-
return f"Error scraping
|
49 |
|
50 |
-
# Image
|
51 |
@tool
|
52 |
-
def
|
53 |
-
"""
|
|
|
54 |
Args:
|
55 |
-
|
|
|
56 |
"""
|
57 |
try:
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
except Exception as e:
|
60 |
-
return f"Error
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
-
# Timezone Tool (already in your code)
|
64 |
@tool
|
65 |
def get_current_time_in_timezone(timezone: str) -> str:
|
66 |
-
"""
|
|
|
67 |
Args:
|
68 |
-
timezone: A valid timezone (e.g., 'America/New_York').
|
69 |
"""
|
70 |
try:
|
|
|
71 |
tz = pytz.timezone(timezone)
|
|
|
72 |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
73 |
return f"The current local time in {timezone} is: {local_time}"
|
74 |
except Exception as e:
|
75 |
-
return f"Error fetching time: {str(e)}"
|
76 |
|
77 |
final_answer = FinalAnswerTool()
|
78 |
|
|
|
79 |
model = HfApiModel(
|
80 |
max_tokens=2096,
|
81 |
temperature=0.5,
|
@@ -83,13 +184,23 @@ model = HfApiModel(
|
|
83 |
custom_role_conversions=None,
|
84 |
)
|
85 |
|
86 |
-
#
|
|
|
|
|
87 |
with open("prompts.yaml", 'r') as stream:
|
88 |
prompt_templates = yaml.safe_load(stream)
|
89 |
|
90 |
agent = CodeAgent(
|
91 |
model=model,
|
92 |
-
tools=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
max_steps=6,
|
94 |
verbosity_level=1,
|
95 |
grammar=None,
|
@@ -99,4 +210,4 @@ agent = CodeAgent(
|
|
99 |
prompt_templates=prompt_templates
|
100 |
)
|
101 |
|
102 |
-
GradioUI(agent).launch()
|
|
|
1 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
|
2 |
import datetime
|
3 |
import requests
|
4 |
import pytz
|
5 |
import yaml
|
|
|
|
|
|
|
|
|
6 |
from tools.final_answer import FinalAnswerTool
|
7 |
from Gradio_UI import GradioUI
|
8 |
+
import bs4
|
9 |
+
from PIL import Image
|
10 |
+
import io
|
11 |
+
import base64
|
12 |
|
13 |
+
# Weather tool
|
|
|
|
|
|
|
14 |
@tool
|
15 |
+
def get_weather(location: str) -> str:
|
16 |
+
"""Fetch current weather information for a specified location.
|
17 |
+
|
18 |
Args:
|
19 |
+
location: A string representing the city, state, or country.
|
20 |
"""
|
21 |
try:
|
22 |
+
# Using OpenWeatherMap API (you'll need an API key)
|
23 |
+
api_key = "YOUR_OPENWEATHERMAP_API_KEY" # Replace with your API key
|
24 |
+
url = f"https://api.openweathermap.org/data/2.5/weather?q={location}&appid={api_key}&units=metric"
|
25 |
response = requests.get(url)
|
26 |
+
data = response.json()
|
27 |
+
|
28 |
+
if response.status_code == 200:
|
29 |
+
weather_description = data['weather'][0]['description']
|
30 |
+
temperature = data['main']['temp']
|
31 |
+
humidity = data['main']['humidity']
|
32 |
+
wind_speed = data['wind']['speed']
|
33 |
+
|
34 |
+
return f"Weather in {location}:\n" \
|
35 |
+
f"Description: {weather_description}\n" \
|
36 |
+
f"Temperature: {temperature}°C\n" \
|
37 |
+
f"Humidity: {humidity}%\n" \
|
38 |
+
f"Wind Speed: {wind_speed} m/s"
|
39 |
+
else:
|
40 |
+
return f"Error fetching weather for {location}: {data.get('message', 'Unknown error')}"
|
41 |
except Exception as e:
|
42 |
+
return f"Error fetching weather for {location}: {str(e)}"
|
43 |
|
44 |
+
# Web scraping tool
|
45 |
@tool
|
46 |
+
def web_scrape(url: str, selector: str = None) -> str:
|
47 |
+
"""Scrape content from a webpage.
|
48 |
+
|
49 |
Args:
|
50 |
+
url: The URL of the webpage to scrape.
|
51 |
+
selector: Optional CSS selector to extract specific elements (default: None, returns full page text).
|
52 |
"""
|
53 |
try:
|
54 |
+
headers = {
|
55 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
56 |
+
}
|
57 |
response = requests.get(url, headers=headers)
|
|
|
58 |
|
59 |
+
if response.status_code != 200:
|
60 |
+
return f"Failed to access the URL. Status code: {response.status_code}"
|
|
|
61 |
|
62 |
+
soup = bs4.BeautifulSoup(response.text, 'html.parser')
|
63 |
+
|
64 |
+
# Remove script and style elements
|
65 |
+
for script in soup(["script", "style"]):
|
66 |
+
script.extract()
|
67 |
+
|
68 |
+
if selector:
|
69 |
+
elements = soup.select(selector)
|
70 |
+
if not elements:
|
71 |
+
return f"No elements found matching selector: {selector}"
|
72 |
+
content = "\n".join([elem.get_text(strip=True) for elem in elements])
|
73 |
+
else:
|
74 |
+
# Get all text
|
75 |
+
content = soup.get_text(separator='\n', strip=True)
|
76 |
+
|
77 |
+
# Truncate if too long
|
78 |
+
if len(content) > 5000:
|
79 |
+
content = content[:5000] + "... (content truncated)"
|
80 |
+
|
81 |
+
return content
|
82 |
except Exception as e:
|
83 |
+
return f"Error scraping {url}: {str(e)}"
|
84 |
|
85 |
+
# Image processing tool
|
86 |
@tool
|
87 |
+
def process_image(image_url: str, operation: str = "info") -> str:
|
88 |
+
"""Process an image with various operations.
|
89 |
+
|
90 |
Args:
|
91 |
+
image_url: URL of the image to process.
|
92 |
+
operation: The operation to perform (options: "info", "resize", "grayscale", "blur").
|
93 |
"""
|
94 |
try:
|
95 |
+
response = requests.get(image_url)
|
96 |
+
if response.status_code != 200:
|
97 |
+
return f"Failed to download image. Status code: {response.status_code}"
|
98 |
+
|
99 |
+
image = Image.open(io.BytesIO(response.content))
|
100 |
+
|
101 |
+
if operation == "info":
|
102 |
+
info = {
|
103 |
+
"format": image.format,
|
104 |
+
"mode": image.mode,
|
105 |
+
"width": image.width,
|
106 |
+
"height": image.height,
|
107 |
+
"size_kb": len(response.content) / 1024
|
108 |
+
}
|
109 |
+
return f"Image information:\n" + "\n".join([f"{k}: {v}" for k, v in info.items()])
|
110 |
+
|
111 |
+
elif operation == "resize":
|
112 |
+
# Resize to 50% of original size
|
113 |
+
new_size = (image.width // 2, image.height // 2)
|
114 |
+
resized = image.resize(new_size)
|
115 |
+
|
116 |
+
# Convert to base64 for return
|
117 |
+
buffered = io.BytesIO()
|
118 |
+
resized.save(buffered, format=image.format if image.format else "JPEG")
|
119 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
120 |
+
|
121 |
+
return f"Resized image (now {new_size[0]}x{new_size[1]}):\ndata:image/{image.format.lower() if image.format else 'jpeg'};base64,{img_str}"
|
122 |
+
|
123 |
+
elif operation == "grayscale":
|
124 |
+
grayscale = image.convert('L')
|
125 |
+
|
126 |
+
# Convert to base64 for return
|
127 |
+
buffered = io.BytesIO()
|
128 |
+
grayscale.save(buffered, format=image.format if image.format else "JPEG")
|
129 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
130 |
+
|
131 |
+
return f"Grayscale image:\ndata:image/{image.format.lower() if image.format else 'jpeg'};base64,{img_str}"
|
132 |
+
|
133 |
+
elif operation == "blur":
|
134 |
+
from PIL import ImageFilter
|
135 |
+
blurred = image.filter(ImageFilter.GaussianBlur(radius=5))
|
136 |
+
|
137 |
+
# Convert to base64 for return
|
138 |
+
buffered = io.BytesIO()
|
139 |
+
blurred.save(buffered, format=image.format if image.format else "JPEG")
|
140 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
141 |
+
|
142 |
+
return f"Blurred image:\ndata:image/{image.format.lower() if image.format else 'jpeg'};base64,{img_str}"
|
143 |
+
|
144 |
+
else:
|
145 |
+
return f"Unknown operation: {operation}. Available operations: info, resize, grayscale, blur"
|
146 |
+
|
147 |
except Exception as e:
|
148 |
+
return f"Error processing image: {str(e)}"
|
149 |
|
150 |
+
# Keeping your existing custom tools
|
151 |
+
@tool
|
152 |
+
def my_custom_tool(arg1:str, arg2:int)-> str:
|
153 |
+
"""A tool that does nothing yet
|
154 |
+
|
155 |
+
Args:
|
156 |
+
arg1: the first argument
|
157 |
+
arg2: the second argument
|
158 |
+
"""
|
159 |
+
return "What magic will you build ?"
|
160 |
|
|
|
161 |
@tool
|
162 |
def get_current_time_in_timezone(timezone: str) -> str:
|
163 |
+
"""A tool that fetches the current local time in a specified timezone.
|
164 |
+
|
165 |
Args:
|
166 |
+
timezone: A string representing a valid timezone (e.g., 'America/New_York').
|
167 |
"""
|
168 |
try:
|
169 |
+
# Create timezone object
|
170 |
tz = pytz.timezone(timezone)
|
171 |
+
# Get current time in that timezone
|
172 |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
173 |
return f"The current local time in {timezone} is: {local_time}"
|
174 |
except Exception as e:
|
175 |
+
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
176 |
|
177 |
final_answer = FinalAnswerTool()
|
178 |
|
179 |
+
# Model setup
|
180 |
model = HfApiModel(
|
181 |
max_tokens=2096,
|
182 |
temperature=0.5,
|
|
|
184 |
custom_role_conversions=None,
|
185 |
)
|
186 |
|
187 |
+
# Import tool from Hub
|
188 |
+
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
189 |
+
|
190 |
with open("prompts.yaml", 'r') as stream:
|
191 |
prompt_templates = yaml.safe_load(stream)
|
192 |
|
193 |
agent = CodeAgent(
|
194 |
model=model,
|
195 |
+
tools=[
|
196 |
+
final_answer,
|
197 |
+
get_weather,
|
198 |
+
web_scrape,
|
199 |
+
process_image,
|
200 |
+
get_current_time_in_timezone,
|
201 |
+
my_custom_tool,
|
202 |
+
image_generation_tool
|
203 |
+
], # Added the new tools here
|
204 |
max_steps=6,
|
205 |
verbosity_level=1,
|
206 |
grammar=None,
|
|
|
210 |
prompt_templates=prompt_templates
|
211 |
)
|
212 |
|
213 |
+
GradioUI(agent).launch()
|