Spaces:
Runtime error
Runtime error
File size: 6,872 Bytes
a401e2a 9b5b26a c19d193 6aae614 8fe992b 9b5b26a a401e2a 9b5b26a 3d1237b 9b5b26a 8c01ffb a401e2a f45da72 a401e2a f45da72 a401e2a f45da72 ac30d64 a401e2a ac30d64 a401e2a ac30d64 f45da72 ac30d64 f45da72 ac30d64 f45da72 a401e2a ae7a494 a401e2a ae7a494 a401e2a 13d500a 8c01ffb f45da72 8c01ffb a401e2a 8c01ffb 8fe992b a401e2a 8c01ffb a401e2a 8fe992b a401e2a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
# =====================================================================
# Local AI Agent using smolagents with LiteLLM and Ollama
# =====================================================================
# This application creates a local AI agent that can:
# - Search the web using DuckDuckGo
# - Generate images using Hugging Face's text-to-image model
# - Get current time in different timezones
# - Check if a number is prime
# - And more!
#
# IMPORTANT: This agent requires local execution with Ollama running on port 11434
# Remote models from Hugging Face (like Qwen2.5-Coder or Mistral-7B) are often overloaded
# and may return 'Payment Required' errors or be paused by their providers.
#
# Setup requirements:
# 1. Install Ollama (https://ollama.ai/)
# 2. Pull the llama3.2 model: `ollama pull llama3.2`
# 3. Ensure Ollama is running before starting this application
# 4. Install Python dependencies from requirements.txt
# =====================================================================
from smolagents import CodeAgent, DuckDuckGoSearchTool, load_tool, tool
from smolagents.models import LiteLLMModel
import os
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
# =====================================================================
# Tool Definitions
# =====================================================================
# Each tool is defined with the @tool decorator and includes docstrings
# that help the agent understand when and how to use each tool.
# =====================================================================
# Simple example tool that can be customized
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
#Keep this format for the description / args / args description but feel free to modify the tool
"""A tool that does nothing yet
Args:
arg1: the first argument
arg2: the second argument
"""
return "What magic will you build ?"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
# Import image generation tool from Hugging Face Hub
# This allows the agent to create images based on text prompts
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
@tool
def generate_image(prompt:str) -> str:
"""Generate image(s) from a text prompt via HF text-to-image.
Args:
prompt: description of the image
"""
return image_generation_tool(prompt)
@tool
def duckduckgo_search(query: str, max_results: int = 5) -> str:
"""Search DuckDuckGo for a query and return the top N results.
Args:
query: The search query string to look up on DuckDuckGo
max_results: Maximum number of search results to return (default is 5)
Returns:
A string containing the search results
"""
searcher = DuckDuckGoSearchTool(max_results=max_results)
return searcher(query)
@tool
def is_prime(number: int) -> bool:
"""
Check if a number is prime using an optimized 6k±1 algorithm.
Args:
number: The integer to check for primality.
Returns:
True if `number` is prime, False otherwise.
"""
# Numbers less than 2 are not prime
if number <= 1:
return False
# 2 and 3 are prime
if number <= 3:
return True
# Eliminate multiples of 2 and 3
if number % 2 == 0 or number % 3 == 0:
return False
# Test 6k ± 1 factors up to sqrt(number)
i = 5
while i * i <= number:
if number % i == 0 or number % (i + 2) == 0:
return False
i += 6
return True
# =====================================================================
# Agent Configuration
# =====================================================================
# The FinalAnswerTool is used to provide final responses to the user
final_answer = FinalAnswerTool()
# IMPORTANT: Remote models are often overloaded or require payment
# Previous attempts to use these models resulted in errors:
# - Qwen/Qwen2.5-Coder-32B-Instruct: "Payment Required" error
# - mistralai/Mistral-7B-Instruct-v0.2: Model was paused
#
# Alternative HF endpoint (if needed):
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
#
# Instead, we use a local Ollama model which is more reliable:
# Configure LiteLLM to use local Ollama instance
os.environ["LITELLM_OLLAMA_API_BASE"] = "http://localhost:11434"
# Initialize the model with appropriate parameters
model = LiteLLMModel(
model_name="ollama/llama3.2", # Using the locally available Llama3.2 model
max_tokens=1024, # Maximum tokens in the response
temperature=0.7, # Controls randomness (higher = more creative)
model_id="ollama/llama3.2" # Explicitly set model_id to avoid default to Claude
)
# =====================================================================
# Agent Initialization
# =====================================================================
# Initialize the agent with the configured model and tools
agent = CodeAgent(
model=model,
# List of tools available to the agent - add new tools here
tools=[
final_answer, # Required for providing final answers
duckduckgo_search, # Web search capability
get_current_time_in_timezone, # Time-related functions
my_custom_tool, # Example custom tool
generate_image, # Image generation
is_prime # Prime number checker
],
max_steps=6, # Maximum reasoning steps per query
verbosity_level=1, # Controls logging detail
grammar=None, # Optional grammar constraints
planning_interval=None, # How often to plan next steps
name=None,
description=None
# Not specifying prompt_templates will use the default ones from smolagents
)
# =====================================================================
# Launch the Gradio Web Interface
# =====================================================================
# This creates a user-friendly web interface for interacting with the agent
# Accessible at http://127.0.0.1:7860 by default
GradioUI(agent).launch() |