File size: 2,191 Bytes
e0cbe47
 
9b5b26a
 
c19d193
e0cbe47
 
 
8fe992b
9b5b26a
 
5df72d6
9b5b26a
3d1237b
9b5b26a
 
 
 
 
 
 
 
 
e0cbe47
 
 
9b5b26a
e0cbe47
 
8c01ffb
e0cbe47
 
 
 
 
 
8c01ffb
6aae614
ae7a494
 
 
 
e0cbe47
8c01ffb
9b5b26a
e0cbe47
8c01ffb
861422e
 
9b5b26a
8c01ffb
8fe992b
e0cbe47
8c01ffb
 
 
 
 
 
861422e
8fe992b
 
9b5b26a
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from smolagents import CodeAgent, tool, Tool,LiteLLMModel, OpenAIServerModel
from tools.final_answer import FinalAnswerTool
import requests
import pytz
import yaml
import pandas as pd
import os, json
from PIL import Image

from Gradio_UI import GradioUI

# Below is an example of a tool that does nothing. Amaze us with your creativity !
@tool
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
    #Keep this format for the description / args / args description but feel free to modify the tool
    """A tool that does nothing yet 
    Args:
        arg1: the first argument
        arg2: the second argument
    """
    return "What magic will you build ?"

@tool
def get_g5_data() -> dict:
    """Returns G5 dataset in a dictionary format.
    G5 dataset contains informations relating to the movements of ships in space and time, mostly between the mid-18th and mid-19th centuries
    """
    df = pd.read_csv("g5data.csv", sep=";", encoding="utf-8")
    return df.to_dict()

@tool
def save_figure()-> None:
  """Save the chart in a image.png file located at "/content/gdrive/MyDrive/ColabNotebooks/Portic" path
  Returns the image displayed"""
  image_path = '/content/gdrive/MyDrive/ColabNotebooks/Portic/image.png'
  return Image.open(image_path)

final_answer = FinalAnswerTool()

# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud' 

model = LiteLLMModel(model_id="gemini/gemini-1.5-flash",  api_key=os.environ["GOOGLE_API_KEY"])

# Import tool from Hub
#image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)

with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)
    
agent = CodeAgent(
    model=model,
    tools=[final_answer,get_g5_data,save_figure], ## add your tools here (don't remove final answer)
    max_steps=6,
    verbosity_level=1,
    grammar=None,
    planning_interval=None,
    name=None,
    description=None,
    prompt_templates=prompt_templates
)


GradioUI(agent).launch()