Spaces:
Sleeping
Sleeping
# Installation of required packages | |
# pip install smolagents requests pytz pyyaml beautifulsoup4 Pillow | |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool | |
import datetime | |
import requests | |
import pytz | |
import yaml | |
from tools.final_answer import FinalAnswerTool | |
from Gradio_UI import GradioUI | |
import bs4 | |
from PIL import Image | |
import io | |
import base64 | |
# Weather tool using Open Meteo (completely free, no API key required) | |
def get_weather(location: str) -> str: | |
"""Fetch current weather information for a specified location. | |
Args: | |
location: A string representing the city name. | |
""" | |
try: | |
# First, geocode the location to get coordinates | |
geocoding_url = f"https://nominatim.openstreetmap.org/search" | |
params = { | |
'q': location, | |
'format': 'json', | |
'limit': 1 | |
} | |
headers = { | |
'User-Agent': 'WeatherToolAgent/1.0' # OpenStreetMap requires a user agent | |
} | |
geo_response = requests.get(geocoding_url, params=params, headers=headers) | |
geo_data = geo_response.json() | |
if not geo_data: | |
return f"Could not find coordinates for location: {location}" | |
# Extract latitude and longitude | |
lat = float(geo_data[0]['lat']) | |
lon = float(geo_data[0]['lon']) | |
display_name = geo_data[0]['display_name'] | |
# Now get weather data from Open-Meteo | |
weather_url = "https://api.open-meteo.com/v1/forecast" | |
weather_params = { | |
'latitude': lat, | |
'longitude': lon, | |
'current': 'temperature_2m,relative_humidity_2m,apparent_temperature,precipitation,weather_code,wind_speed_10m', | |
'timezone': 'auto' | |
} | |
weather_response = requests.get(weather_url, params=weather_params) | |
weather_data = weather_response.json() | |
if 'current' not in weather_data: | |
return f"Error fetching weather data for {location}" | |
# WMO Weather interpretation codes (https://open-meteo.com/en/docs) | |
weather_codes = { | |
0: "Clear sky", | |
1: "Mainly clear", 2: "Partly cloudy", 3: "Overcast", | |
45: "Fog", 48: "Depositing rime fog", | |
51: "Light drizzle", 53: "Moderate drizzle", 55: "Dense drizzle", | |
56: "Light freezing drizzle", 57: "Dense freezing drizzle", | |
61: "Slight rain", 63: "Moderate rain", 65: "Heavy rain", | |
66: "Light freezing rain", 67: "Heavy freezing rain", | |
71: "Slight snow fall", 73: "Moderate snow fall", 75: "Heavy snow fall", | |
77: "Snow grains", | |
80: "Slight rain showers", 81: "Moderate rain showers", 82: "Violent rain showers", | |
85: "Slight snow showers", 86: "Heavy snow showers", | |
95: "Thunderstorm", 96: "Thunderstorm with slight hail", 99: "Thunderstorm with heavy hail" | |
} | |
current = weather_data['current'] | |
weather_description = weather_codes.get(current['weather_code'], "Unknown") | |
weather_info = ( | |
f"Weather in {display_name}:\n" | |
f"Condition: {weather_description}\n" | |
f"Temperature: {current['temperature_2m']}°C\n" | |
f"Feels like: {current['apparent_temperature']}°C\n" | |
f"Humidity: {current['relative_humidity_2m']}%\n" | |
f"Wind Speed: {current['wind_speed_10m']} km/h\n" | |
f"Precipitation: {current['precipitation']} mm" | |
) | |
return weather_info | |
except Exception as e: | |
return f"Error fetching weather for {location}: {str(e)}" | |
# Web scraping tool | |
def web_scrape(url: str, selector: str = None) -> str: | |
"""Scrape content from a webpage. | |
Args: | |
url: The URL of the webpage to scrape. | |
selector: Optional CSS selector to extract specific elements (default: None, returns full page text). | |
""" | |
try: | |
headers = { | |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' | |
} | |
response = requests.get(url, headers=headers) | |
if response.status_code != 200: | |
return f"Failed to access the URL. Status code: {response.status_code}" | |
soup = bs4.BeautifulSoup(response.text, 'html.parser') | |
# Remove script and style elements | |
for script in soup(["script", "style"]): | |
script.extract() | |
if selector: | |
elements = soup.select(selector) | |
if not elements: | |
return f"No elements found matching selector: {selector}" | |
content = "\n".join([elem.get_text(strip=True) for elem in elements]) | |
else: | |
# Get all text | |
content = soup.get_text(separator='\n', strip=True) | |
# Truncate if too long | |
if len(content) > 5000: | |
content = content[:5000] + "... (content truncated)" | |
return content | |
except Exception as e: | |
return f"Error scraping {url}: {str(e)}" | |
# Image processing tool | |
def process_image(image_url: str, operation: str = "info") -> str: | |
"""Process an image with various operations. | |
Args: | |
image_url: URL of the image to process. | |
operation: The operation to perform (options: "info", "resize", "grayscale", "blur"). | |
""" | |
try: | |
response = requests.get(image_url) | |
if response.status_code != 200: | |
return f"Failed to download image. Status code: {response.status_code}" | |
image = Image.open(io.BytesIO(response.content)) | |
if operation == "info": | |
info = { | |
"format": image.format, | |
"mode": image.mode, | |
"width": image.width, | |
"height": image.height, | |
"size_kb": len(response.content) / 1024 | |
} | |
return f"Image information:\n" + "\n".join([f"{k}: {v}" for k, v in info.items()]) | |
elif operation == "resize": | |
# Resize to 50% of original size | |
new_size = (image.width // 2, image.height // 2) | |
resized = image.resize(new_size) | |
# Convert to base64 for return | |
buffered = io.BytesIO() | |
resized.save(buffered, format=image.format if image.format else "JPEG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
return f"Resized image (now {new_size[0]}x{new_size[1]}):\ndata:image/{image.format.lower() if image.format else 'jpeg'};base64,{img_str}" | |
elif operation == "grayscale": | |
grayscale = image.convert('L') | |
# Convert to base64 for return | |
buffered = io.BytesIO() | |
grayscale.save(buffered, format=image.format if image.format else "JPEG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
return f"Grayscale image:\ndata:image/{image.format.lower() if image.format else 'jpeg'};base64,{img_str}" | |
elif operation == "blur": | |
from PIL import ImageFilter | |
blurred = image.filter(ImageFilter.GaussianBlur(radius=5)) | |
# Convert to base64 for return | |
buffered = io.BytesIO() | |
blurred.save(buffered, format=image.format if image.format else "JPEG") | |
img_str = base64.b64encode(buffered.getvalue()).decode() | |
return f"Blurred image:\ndata:image/{image.format.lower() if image.format else 'jpeg'};base64,{img_str}" | |
else: | |
return f"Unknown operation: {operation}. Available operations: info, resize, grayscale, blur" | |
except Exception as e: | |
return f"Error processing image: {str(e)}" | |
# Keeping your existing custom tools | |
def my_custom_tool(arg1:str, arg2:int)-> str: | |
"""A tool that does nothing yet | |
Args: | |
arg1: the first argument | |
arg2: the second argument | |
""" | |
return "What magic will you build ?" | |
def get_current_time_in_timezone(timezone: str) -> str: | |
"""A tool that fetches the current local time in a specified timezone. | |
Args: | |
timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
""" | |
try: | |
# Create timezone object | |
tz = pytz.timezone(timezone) | |
# Get current time in that timezone | |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
return f"The current local time in {timezone} is: {local_time}" | |
except Exception as e: | |
return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
final_answer = FinalAnswerTool() | |
# Model setup | |
model = HfApiModel( | |
max_tokens=2096, | |
temperature=0.5, | |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct', | |
custom_role_conversions=None, | |
) | |
# Import tool from Hub | |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
with open("prompts.yaml", 'r') as stream: | |
prompt_templates = yaml.safe_load(stream) | |
agent = CodeAgent( | |
model=model, | |
tools=[ | |
final_answer, | |
get_weather, | |
web_scrape, | |
process_image, | |
get_current_time_in_timezone, | |
my_custom_tool, | |
image_generation_tool | |
], # Added the new tools here | |
max_steps=6, | |
verbosity_level=1, | |
grammar=None, | |
planning_interval=None, | |
name=None, | |
description=None, | |
prompt_templates=prompt_templates | |
) | |
GradioUI(agent).launch() |