Spaces:
Sleeping
Sleeping
Update file
Browse files
app.py
CHANGED
@@ -8,17 +8,19 @@ import os
|
|
8 |
from huggingface_hub import InferenceClient
|
9 |
from Gradio_UI import GradioUI
|
10 |
from dotenv import load_dotenv
|
|
|
|
|
11 |
load_dotenv()
|
12 |
|
13 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
|
14 |
|
15 |
-
#
|
16 |
|
17 |
|
18 |
@tool
|
19 |
-
# it's import to specify the return type
|
20 |
def my_custom_tool(arg1: str, arg2: int) -> str:
|
21 |
-
# Keep this format for the description / args / args description but feel free to modify the tool
|
22 |
"""A tool that does nothing yet
|
23 |
Args:
|
24 |
arg1: the first argument
|
@@ -28,7 +30,7 @@ def my_custom_tool(arg1: str, arg2: int) -> str:
|
|
28 |
|
29 |
|
30 |
def get_weather_report_at_coordinates(coordinates, date_time):
|
31 |
-
# Dummy function, returns
|
32 |
return [28.0, 0.35, 0.85]
|
33 |
|
34 |
|
@@ -41,13 +43,12 @@ def convert_location_to_coordinates(location):
|
|
41 |
def get_weather_api(location: str, date_time: str) -> str:
|
42 |
"""
|
43 |
Returns the weather report.
|
44 |
-
|
45 |
Args:
|
46 |
location: the name of the place that you want the weather for.
|
47 |
date_time: the date and time for which you want the report.
|
48 |
"""
|
49 |
lon, lat = convert_location_to_coordinates(location)
|
50 |
-
date_time = datetime.strptime(date_time)
|
51 |
return str(get_weather_report_at_coordinates((lon, lat), date_time))
|
52 |
|
53 |
|
@@ -55,14 +56,10 @@ user_data = {}
|
|
55 |
|
56 |
|
57 |
def update_personality(name: str, personality: str) -> str:
|
58 |
-
"""Asks the user about his personality before predicting his future
|
59 |
-
"""
|
60 |
user_data[name] = personality
|
61 |
-
return f"Great! Thanks {name} I
|
62 |
-
|
63 |
|
64 |
-
'''
|
65 |
-
I would like to use an AI model that takes the name and personality and predicts number of kids, career etc '''
|
66 |
|
67 |
client = InferenceClient(model="Qwen/Qwen2.5-Coder-32B-Instruct")
|
68 |
|
@@ -70,28 +67,20 @@ client = InferenceClient(model="Qwen/Qwen2.5-Coder-32B-Instruct")
|
|
70 |
@tool
|
71 |
def predict_future_with_model(name: str, personality: str) -> str:
|
72 |
"""
|
73 |
-
|
74 |
-
Returns:
|
75 |
-
str: A fun and futuristic AI-generated prediction.
|
76 |
Args:
|
77 |
name: The user's name.
|
78 |
personality: A description of the user's personality traits.
|
79 |
-
|
80 |
-
|
81 |
"""
|
82 |
-
|
83 |
prompt = f"""
|
84 |
Given the name '{name}' and personality traits '{personality}', generate a fun, futuristic prediction for their life.
|
85 |
-
|
86 |
Your response should include:
|
87 |
- A career path
|
88 |
- A major life event
|
89 |
- The number of kids they might have
|
90 |
- A quirky or funny twist related to their personality
|
91 |
-
|
92 |
Keep it engaging, futuristic, and a little humorous!
|
93 |
"""
|
94 |
-
|
95 |
try:
|
96 |
response = client.text_generation(prompt, max_new_tokens=100)
|
97 |
return f"🔮 **Future Prediction for {name}:**\n{response}"
|
@@ -106,42 +95,75 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
106 |
timezone: A string representing a valid timezone (e.g., 'America/New_York').
|
107 |
"""
|
108 |
try:
|
109 |
-
# Create timezone object
|
110 |
tz = pytz.timezone(timezone)
|
111 |
-
# Get current time in that timezone
|
112 |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
113 |
return f"The current local time in {timezone} is: {local_time}"
|
114 |
except Exception as e:
|
115 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
116 |
|
117 |
|
118 |
-
|
119 |
-
|
120 |
"""
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
return random.choice(jokes)
|
128 |
|
129 |
|
130 |
final_answer = FinalAnswerTool()
|
131 |
|
132 |
-
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
133 |
-
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
134 |
-
|
135 |
model = HfApiModel(
|
136 |
max_tokens=2096,
|
137 |
temperature=0.5,
|
138 |
-
# it is possible that this model may be overloaded
|
139 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
140 |
custom_role_conversions=None,
|
141 |
)
|
142 |
|
143 |
-
|
144 |
-
# Import tool from Hub
|
145 |
image_generation_tool = load_tool(
|
146 |
"agents-course/text-to-image", trust_remote_code=True)
|
147 |
|
@@ -150,7 +172,15 @@ with open("prompts.yaml", 'r') as stream:
|
|
150 |
|
151 |
agent = CodeAgent(
|
152 |
model=model,
|
153 |
-
tools=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
max_steps=6,
|
155 |
verbosity_level=1,
|
156 |
grammar=None,
|
@@ -160,5 +190,4 @@ agent = CodeAgent(
|
|
160 |
prompt_templates=prompt_templates
|
161 |
)
|
162 |
|
163 |
-
|
164 |
GradioUI(agent).launch()
|
|
|
8 |
from huggingface_hub import InferenceClient
|
9 |
from Gradio_UI import GradioUI
|
10 |
from dotenv import load_dotenv
|
11 |
+
import random # Added for tell_joke()
|
12 |
+
|
13 |
load_dotenv()
|
14 |
|
15 |
hf_token = os.getenv("HF_TOKEN")
|
16 |
+
alpha_vantage_api_key = os.getenv(
|
17 |
+
"ALPHA_VANTAGE_API_KEY") # Load Alpha Vantage API key
|
18 |
|
19 |
+
# Custom tool example
|
20 |
|
21 |
|
22 |
@tool
|
|
|
23 |
def my_custom_tool(arg1: str, arg2: int) -> str:
|
|
|
24 |
"""A tool that does nothing yet
|
25 |
Args:
|
26 |
arg1: the first argument
|
|
|
30 |
|
31 |
|
32 |
def get_weather_report_at_coordinates(coordinates, date_time):
|
33 |
+
# Dummy function, returns [temperature in °C, risk of rain 0-1, wave height in m]
|
34 |
return [28.0, 0.35, 0.85]
|
35 |
|
36 |
|
|
|
43 |
def get_weather_api(location: str, date_time: str) -> str:
|
44 |
"""
|
45 |
Returns the weather report.
|
|
|
46 |
Args:
|
47 |
location: the name of the place that you want the weather for.
|
48 |
date_time: the date and time for which you want the report.
|
49 |
"""
|
50 |
lon, lat = convert_location_to_coordinates(location)
|
51 |
+
date_time = datetime.datetime.strptime(date_time, "%Y-%m-%d %H:%M:%S")
|
52 |
return str(get_weather_report_at_coordinates((lon, lat), date_time))
|
53 |
|
54 |
|
|
|
56 |
|
57 |
|
58 |
def update_personality(name: str, personality: str) -> str:
|
59 |
+
"""Asks the user about his personality before predicting his future"""
|
|
|
60 |
user_data[name] = personality
|
61 |
+
return f"Great! Thanks {name}, I've updated your personality traits. Now ask me about your future."
|
|
|
62 |
|
|
|
|
|
63 |
|
64 |
client = InferenceClient(model="Qwen/Qwen2.5-Coder-32B-Instruct")
|
65 |
|
|
|
67 |
@tool
|
68 |
def predict_future_with_model(name: str, personality: str) -> str:
|
69 |
"""
|
70 |
+
Returns a fun and futuristic AI-generated prediction.
|
|
|
|
|
71 |
Args:
|
72 |
name: The user's name.
|
73 |
personality: A description of the user's personality traits.
|
|
|
|
|
74 |
"""
|
|
|
75 |
prompt = f"""
|
76 |
Given the name '{name}' and personality traits '{personality}', generate a fun, futuristic prediction for their life.
|
|
|
77 |
Your response should include:
|
78 |
- A career path
|
79 |
- A major life event
|
80 |
- The number of kids they might have
|
81 |
- A quirky or funny twist related to their personality
|
|
|
82 |
Keep it engaging, futuristic, and a little humorous!
|
83 |
"""
|
|
|
84 |
try:
|
85 |
response = client.text_generation(prompt, max_new_tokens=100)
|
86 |
return f"🔮 **Future Prediction for {name}:**\n{response}"
|
|
|
95 |
timezone: A string representing a valid timezone (e.g., 'America/New_York').
|
96 |
"""
|
97 |
try:
|
|
|
98 |
tz = pytz.timezone(timezone)
|
|
|
99 |
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
100 |
return f"The current local time in {timezone} is: {local_time}"
|
101 |
except Exception as e:
|
102 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
103 |
|
104 |
|
105 |
+
@tool
|
106 |
+
def get_financial_price(ticker: str) -> str:
|
107 |
"""
|
108 |
+
Fetches the real-time price of a stock, cryptocurrency, or financial product using Alpha Vantage API.
|
109 |
+
Args:
|
110 |
+
ticker: The ticker symbol (e.g., 'AAPL' for Apple stock, 'BTCUSD' for Bitcoin/USD).
|
111 |
+
"""
|
112 |
+
if not alpha_vantage_api_key:
|
113 |
+
return "Error: Alpha Vantage API key not found. Please set ALPHA_VANTAGE_API_KEY in your .env file."
|
114 |
+
|
115 |
+
# Determine if it's a crypto or stock based on ticker format (simplified logic)
|
116 |
+
is_crypto = len(ticker) > 5 and ticker.endswith(
|
117 |
+
("USD", "BTC", "ETH")) # e.g., BTCUSD, ETHBTC
|
118 |
+
if is_crypto:
|
119 |
+
url = f"https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency={ticker[:3]}&to_currency={ticker[3:]}&apikey={alpha_vantage_api_key}"
|
120 |
+
else:
|
121 |
+
url = f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={ticker}&apikey={alpha_vantage_api_key}"
|
122 |
+
|
123 |
+
try:
|
124 |
+
response = requests.get(url)
|
125 |
+
data = response.json()
|
126 |
+
|
127 |
+
if is_crypto:
|
128 |
+
if "Realtime Currency Exchange Rate" in data:
|
129 |
+
price = data["Realtime Currency Exchange Rate"]["5. Exchange Rate"]
|
130 |
+
return f"The current price of {ticker[:3]} in {ticker[3:]} is {float(price):.2f} {ticker[3:]}."
|
131 |
+
else:
|
132 |
+
return f"Error: Could not fetch crypto price for {ticker}. Check the ticker symbol."
|
133 |
+
else:
|
134 |
+
if "Global Quote" in data and "05. price" in data["Global Quote"]:
|
135 |
+
price = data["Global Quote"]["05. price"]
|
136 |
+
return f"The current price of {ticker} is ${float(price):.2f} USD."
|
137 |
+
else:
|
138 |
+
return f"Error: Could not fetch stock price for {ticker}. Check the ticker symbol or API limits."
|
139 |
+
|
140 |
+
except Exception as e:
|
141 |
+
return f"Error fetching price for {ticker}: {str(e)}"
|
142 |
+
|
143 |
+
|
144 |
+
@tool
|
145 |
+
def tell_joke() -> str:
|
146 |
+
"""Returns a random stored joke."""
|
147 |
+
jokes = [
|
148 |
+
"Why do we tell actors to 'break a leg?' Because every play has a cast.",
|
149 |
+
"I told my wife she should embrace her mistakes. She gave me a hug.",
|
150 |
+
"I'm reading a book on the history of glue. I just can't seem to put it down.",
|
151 |
+
"I would tell you a joke about an elevator, but it's an uplifting experience.",
|
152 |
+
"I told my computer I needed a break and now it won't stop sending me vacation ads.",
|
153 |
+
"I used to play piano by ear, but now I use my hands"
|
154 |
+
]
|
155 |
return random.choice(jokes)
|
156 |
|
157 |
|
158 |
final_answer = FinalAnswerTool()
|
159 |
|
|
|
|
|
|
|
160 |
model = HfApiModel(
|
161 |
max_tokens=2096,
|
162 |
temperature=0.5,
|
|
|
163 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
164 |
custom_role_conversions=None,
|
165 |
)
|
166 |
|
|
|
|
|
167 |
image_generation_tool = load_tool(
|
168 |
"agents-course/text-to-image", trust_remote_code=True)
|
169 |
|
|
|
172 |
|
173 |
agent = CodeAgent(
|
174 |
model=model,
|
175 |
+
tools=[
|
176 |
+
my_custom_tool,
|
177 |
+
get_weather_api,
|
178 |
+
predict_future_with_model,
|
179 |
+
get_current_time_in_timezone,
|
180 |
+
get_financial_price, # New tool added here
|
181 |
+
tell_joke,
|
182 |
+
final_answer
|
183 |
+
],
|
184 |
max_steps=6,
|
185 |
verbosity_level=1,
|
186 |
grammar=None,
|
|
|
190 |
prompt_templates=prompt_templates
|
191 |
)
|
192 |
|
|
|
193 |
GradioUI(agent).launch()
|