Spaces:
Running
Running
File size: 2,614 Bytes
959296e c92c3bf 0e607c7 c92c3bf 9b5b26a c19d193 4c2a00d 0e607c7 4db54a2 0e607c7 6aae614 e38aeb8 95de6d7 8fe992b 9b5b26a 5df72d6 4397750 5c18de9 9b5b26a 453e1a8 9b5b26a 8c01ffb 6aae614 e38aeb8 be94a9b aa945ae 4435c97 aa945ae 8c01ffb 9b5b26a 8c01ffb 85f8fc6 861422e 85f8fc6 776a454 8c01ffb 8fe992b e38aeb8 8c01ffb 85f8fc6 8fe992b 9b5b26a 8c01ffb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
# Google gemini
# from litellm import LiteLLMModel, RateLimitError
import datetime
import requests
import yaml
import os
try:
import pytz
print("pytz is imported.")
except ImportError:
print("pytz is not imported.")
# Tools
from tools.final_answer import FinalAnswerTool
from tools.polite_guard import PoliteGuardTool
# from tools
# from tools.web_search import DuckDuckGoSearchTool
from Gradio_UI import GradioUI
# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""A tool that fetches the current local time in a specified timezone.
Args:
timezone: A string representing a valid timezone (e.g., 'America/New_York').
"""
try:
# Create timezone object
tz = pytz.timezone(timezone)
# Get current time in that timezone
# Just get the time, not the entire date and time
local_time = datetime.datetime.now(tz).strftime("%I:%M %p") # %I for 12-hour clock, %M for minutes, %p for AM/PM
#local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
final_answer = FinalAnswerTool()
polite_guard = PoliteGuardTool()
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
# it is possible that this model may be overloaded
custom_role_conversions=None,
)
#model = LiteLLMModel(
# model_id="gemini/gemini-2.0-flash-exp",
# max_tokens=2096,
# temperature=0.6,
# api_key=os.getenv("LITELLM_API_KEY")
#)
# Import tool from Hub
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
# Load prompts.yaml
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Load contentprompts.yml
with open("contentprompts.yml", 'r') as stream:
content_prompts = yaml.safe_load(stream)
combined_prompts = {**prompt_templates, **content_prompts}
#
# web_search, visit_webpage
agent = CodeAgent(
model=model,
tools=[final_answer, polite_guard, get_current_time_in_timezone ], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=combined_prompts
)
GradioUI(agent).launch() |