from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool # Google Gemini #from litellm import LiteLLMModel, RateLimitError import datetime import requests import yaml import os import pytz # Had to give it permission in Code agent # Tools from tools.final_answer import FinalAnswerTool from tools.polite_guard import PoliteGuardTool from tools.web_search import DuckDuckGoSearchTool from check_endpoint import is_huggingface_endpoint from Gradio_UI import GradioUI @tool def get_the_current_time_in_timezone(timezone: str) -> str: """A tool that fetches the current local time in a specified timezone. Args: timezone: A string representing a valid timezone (e.g., 'America/New_York'). Returns: string: A sentence that provides the time using the 12-hour clock including AM/PM """ try: # Create timezone object tz = pytz.timezone(timezone) # Get current time in that timezone local_time = datetime.datetime.now(tz).strftime("%I:%M %p") # %I for 12-hour clock, %M for minutes, %p for AM/PM return f"The current local time in {timezone} is: {local_time}" except Exception as e: return f"Error fetching time for timezone '{timezone}': {str(e)}" final_answer = FinalAnswerTool() #@tool polite_guard = PoliteGuardTool() web_search = DuckDuckGoSearchTool() # Switch to Gemini if this model gets overloaded my_id = 'Qwen/Qwen2.5-Coder-32B-Instruct' # example of an end point #my_id="https://ntqicork29enjoy5.us-east4.gcp.endpoints.huggingface.cloud" my_id = os.getenv("QWEN_URI") # Test the endpoint if is_huggingface_endpoint(my_id): print("This is a Hugging Face Inference Endpoint.") else: print("This is NOT a Hugging Face Inference Endpoint.") sys.exit(1) # Stop execution if the endpoint is not valid ''' model = HfApiModel( max_tokens=2096, temperature=0.5, model_id= my_id, custom_role_conversions=None, ) ''' ''' model = LiteLLMModel( model_id="gemini/gemini-2.0-flash-exp", max_tokens=2096, temperature=0.6, api_key=os.getenv("LITELLM_API_KEY") ) ''' # Import tool from Hub #image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) # Load prompts.yaml with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) # Load contentprompts.yml with open("contentprompts.yml", 'r') as stream: content_prompts = yaml.safe_load(stream) combined_prompts = {**prompt_templates, **content_prompts} # web_search, visit_webpage ''' agent = CodeAgent( model=model, tools=[final_answer, polite_guard, web_search, get_the_current_time_in_timezone ], ## add your tools here (don't remove final answer) max_steps=6, verbosity_level=3, grammar=None, planning_interval=None, name="Content Agent", description="Evaluates whether text is polite or impolite. ", prompt_templates=combined_prompts, additional_authorized_imports=["pytz"] ) GradioUI(agent).launch() '''