from smolagents import CodeAgent, tool, Tool,LiteLLMModel, OpenAIServerModel from tools.final_answer import FinalAnswerTool import requests import pytz import yaml import pandas as pd import os, json from PIL import Image from Gradio_UI import GradioUI # Below is an example of a tool that does nothing. Amaze us with your creativity ! @tool def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type #Keep this format for the description / args / args description but feel free to modify the tool """A tool that does nothing yet Args: arg1: the first argument arg2: the second argument """ return "What magic will you build ?" @tool def get_g5_data() -> dict: """Returns G5 dataset in a dictionary format. G5 dataset contains informations relating to the movements of ships in space and time, mostly between the mid-18th and mid-19th centuries """ df = pd.read_csv("g5data.csv", sep=";", encoding="utf-8") return df.to_dict() @tool def save_figure()-> None: """Save the plot chart in a image.png file. Returns the image displayed""" image_path = './image.png' return Image.open(image_path) final_answer = FinalAnswerTool() # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder: # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' model = LiteLLMModel(model_id="gemini/gemini-1.5-flash", api_key=os.getenv('GOOGLE_API_KEY')) # Import tool from Hub #image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[get_g5_data,save_figure], ## add your tools here (don't remove final answer) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, #prompt_templates=prompt_templates, add_base_tools = True, additional_authorized_imports=['numpy', 'pandas','matplotlib.pyplot'] ) GradioUI(agent).launch()