Geraldine commited on
Commit
e0cbe47
·
verified ·
1 Parent(s): ae7a494

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -24
app.py CHANGED
@@ -1,9 +1,11 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
- import datetime
3
  import requests
4
  import pytz
5
  import yaml
6
- from tools.final_answer import FinalAnswerTool
 
 
7
 
8
  from Gradio_UI import GradioUI
9
 
@@ -19,43 +21,36 @@ def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return
19
  return "What magic will you build ?"
20
 
21
  @tool
22
- def get_current_time_in_timezone(timezone: str) -> str:
23
- """A tool that fetches the current local time in a specified timezone.
24
- Args:
25
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
  """
27
- try:
28
- # Create timezone object
29
- tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
- local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
- return f"The current local time in {timezone} is: {local_time}"
33
- except Exception as e:
34
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
 
 
 
 
 
 
36
 
37
  final_answer = FinalAnswerTool()
38
 
39
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
42
- model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
- )
48
-
49
 
50
  # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
 
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
 
1
+ from smolagents import CodeAgent, tool, Tool,LiteLLMModel, OpenAIServerModel
2
+ from tools.final_answer import FinalAnswerTool
3
  import requests
4
  import pytz
5
  import yaml
6
+ import pandas as pd
7
+ import os, json
8
+ from PIL import Image
9
 
10
  from Gradio_UI import GradioUI
11
 
 
21
  return "What magic will you build ?"
22
 
23
  @tool
24
+ def get_g5_data() -> dict:
25
+ """Returns G5 dataset in a dictionary format.
26
+ G5 dataset contains informations relating to the movements of ships in space and time, mostly between the mid-18th and mid-19th centuries
 
27
  """
28
+ df = pd.read_csv("g5data.csv", sep=";", encoding="utf-8")
29
+ return df.to_dict()
 
 
 
 
 
 
30
 
31
+ @tool
32
+ def save_figure()-> None:
33
+ """Save the chart in a image.png file located at "/content/gdrive/MyDrive/ColabNotebooks/Portic" path
34
+ Returns the image displayed"""
35
+ image_path = '/content/gdrive/MyDrive/ColabNotebooks/Portic/image.png'
36
+ return Image.open(image_path)
37
 
38
  final_answer = FinalAnswerTool()
39
 
40
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
41
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
42
 
43
+ model = LiteLLMModel(model_id="gemini/gemini-1.5-flash", api_key=os.environ["GOOGLE_API_KEY"])
 
 
 
 
 
 
44
 
45
  # Import tool from Hub
46
+ #image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
47
 
48
  with open("prompts.yaml", 'r') as stream:
49
  prompt_templates = yaml.safe_load(stream)
50
 
51
  agent = CodeAgent(
52
  model=model,
53
+ tools=[final_answer,get_g5_data,save_figure], ## add your tools here (don't remove final answer)
54
  max_steps=6,
55
  verbosity_level=1,
56
  grammar=None,