Spaces:
Runtime error
Runtime error
# ===================================================================== | |
# Local AI Agent using smolagents with LiteLLM and Ollama | |
# ===================================================================== | |
# This application creates a local AI agent that can: | |
# - Search the web using DuckDuckGo | |
# - Generate images using Hugging Face's text-to-image model | |
# - Get current time in different timezones | |
# - Check if a number is prime | |
# - And more! | |
# | |
# IMPORTANT: This agent requires local execution with Ollama running on port 11434 | |
# Remote models from Hugging Face (like Qwen2.5-Coder or Mistral-7B) are often overloaded | |
# and may return 'Payment Required' errors or be paused by their providers. | |
# | |
# Setup requirements: | |
# 1. Install Ollama (https://ollama.ai/) | |
# 2. Pull the llama3.2 model: `ollama pull llama3.2` | |
# 3. Ensure Ollama is running before starting this application | |
# 4. Install Python dependencies from requirements.txt | |
# ===================================================================== | |
from smolagents import CodeAgent, DuckDuckGoSearchTool, load_tool, tool | |
from smolagents.models import LiteLLMModel | |
import os | |
import datetime | |
import requests | |
import pytz | |
import yaml | |
from tools.final_answer import FinalAnswerTool | |
from Gradio_UI import GradioUI | |
# ===================================================================== | |
# Tool Definitions | |
# ===================================================================== | |
# Each tool is defined with the @tool decorator and includes docstrings | |
# that help the agent understand when and how to use each tool. | |
# ===================================================================== | |
# Simple example tool that can be customized | |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type | |
#Keep this format for the description / args / args description but feel free to modify the tool | |
"""A tool that does nothing yet | |
Args: | |
arg1: the first argument | |
arg2: the second argument | |
""" | |
return "What magic will you build ?" | |
def get_current_time_in_timezone(timezone: str) -> str: | |
"""A tool that fetches the current local time in a specified timezone. | |
Args: | |
timezone: A string representing a valid timezone (e.g., 'America/New_York'). | |
""" | |
try: | |
# Create timezone object | |
tz = pytz.timezone(timezone) | |
# Get current time in that timezone | |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") | |
return f"The current local time in {timezone} is: {local_time}" | |
except Exception as e: | |
return f"Error fetching time for timezone '{timezone}': {str(e)}" | |
# Import image generation tool from Hugging Face Hub | |
# This allows the agent to create images based on text prompts | |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) | |
def generate_image(prompt:str) -> str: | |
"""Generate image(s) from a text prompt via HF text-to-image. | |
Args: | |
prompt: description of the image | |
""" | |
return image_generation_tool(prompt) | |
def duckduckgo_search(query: str, max_results: int = 5) -> str: | |
"""Search DuckDuckGo for a query and return the top N results. | |
Args: | |
query: The search query string to look up on DuckDuckGo | |
max_results: Maximum number of search results to return (default is 5) | |
Returns: | |
A string containing the search results | |
""" | |
searcher = DuckDuckGoSearchTool(max_results=max_results) | |
return searcher(query) | |
def is_prime(number: int) -> bool: | |
""" | |
Check if a number is prime using an optimized 6k±1 algorithm. | |
Args: | |
number: The integer to check for primality. | |
Returns: | |
True if `number` is prime, False otherwise. | |
""" | |
# Numbers less than 2 are not prime | |
if number <= 1: | |
return False | |
# 2 and 3 are prime | |
if number <= 3: | |
return True | |
# Eliminate multiples of 2 and 3 | |
if number % 2 == 0 or number % 3 == 0: | |
return False | |
# Test 6k ± 1 factors up to sqrt(number) | |
i = 5 | |
while i * i <= number: | |
if number % i == 0 or number % (i + 2) == 0: | |
return False | |
i += 6 | |
return True | |
# ===================================================================== | |
# Agent Configuration | |
# ===================================================================== | |
# The FinalAnswerTool is used to provide final responses to the user | |
final_answer = FinalAnswerTool() | |
# IMPORTANT: Remote models are often overloaded or require payment | |
# Previous attempts to use these models resulted in errors: | |
# - Qwen/Qwen2.5-Coder-32B-Instruct: "Payment Required" error | |
# - mistralai/Mistral-7B-Instruct-v0.2: Model was paused | |
# | |
# Alternative HF endpoint (if needed): | |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' | |
# | |
# Instead, we use a local Ollama model which is more reliable: | |
# Configure LiteLLM to use local Ollama instance | |
os.environ["LITELLM_OLLAMA_API_BASE"] = "http://localhost:11434" | |
# Initialize the model with appropriate parameters | |
model = LiteLLMModel( | |
model_name="ollama/llama3.2", # Using the locally available Llama3.2 model | |
max_tokens=1024, # Maximum tokens in the response | |
temperature=0.7, # Controls randomness (higher = more creative) | |
model_id="ollama/llama3.2" # Explicitly set model_id to avoid default to Claude | |
) | |
# ===================================================================== | |
# Agent Initialization | |
# ===================================================================== | |
# Initialize the agent with the configured model and tools | |
agent = CodeAgent( | |
model=model, | |
# List of tools available to the agent - add new tools here | |
tools=[ | |
final_answer, # Required for providing final answers | |
duckduckgo_search, # Web search capability | |
get_current_time_in_timezone, # Time-related functions | |
my_custom_tool, # Example custom tool | |
generate_image, # Image generation | |
is_prime # Prime number checker | |
], | |
max_steps=6, # Maximum reasoning steps per query | |
verbosity_level=1, # Controls logging detail | |
grammar=None, # Optional grammar constraints | |
planning_interval=None, # How often to plan next steps | |
name=None, | |
description=None | |
# Not specifying prompt_templates will use the default ones from smolagents | |
) | |
# ===================================================================== | |
# Launch the Gradio Web Interface | |
# ===================================================================== | |
# This creates a user-friendly web interface for interacting with the agent | |
# Accessible at http://127.0.0.1:7860 by default | |
GradioUI(agent).launch() |