File size: 2,258 Bytes
cc616e1 9b5b26a c19d193 6aae614 9b5b26a cc616e1 9b5b26a cc616e1 9b5b26a cc616e1 9b5b26a cc616e1 8c01ffb 6aae614 ae7a494 e121372 cc616e1 13d500a 8c01ffb 9b5b26a 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b cc616e1 8c01ffb 861422e 8fe992b 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
@tool
def search_hf_models(query: str, task: str = None) -> str:
"""Searches Hugging Face models by query/task."""
try:
params = {"search": query, "filter": task} if task else {"search": query}
response = requests.get(
"https://huggingface.co/api/models",
params=params,
headers={"Accept": "application/json"}
)
response.raise_for_status()
results = []
for model in response.json()[:5]:
model_info = f"""
🧠 **{model['modelId']}**
- Author: {model['author']}
- Downloads: {model['downloads']:,}
- Tasks: {', '.join(model.get('tags', []))}
- URL: https://huggingface.co/{model['modelId']}
"""
results.append(model_info)
return "\n\n".join(results) if results else "No models found"
except Exception as e:
return f"Search error: {str(e)}"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""Fetches current local time in specified timezone."""
try:
tz = pytz.timezone(timezone)
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"Current time in {timezone}: {local_time}"
except Exception as e:
return f"Timezone error: {str(e)}"
final_answer = FinalAnswerTool()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[final_answer, search_hf_models, get_current_time_in_timezone, image_generation_tool],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |