Spaces:
Runtime error
Runtime error
Michel Mesquita
commited on
Commit
·
a401e2a
1
Parent(s):
ac30d64
Running locally
Browse files- .gradio/certificate.pem +31 -0
- Gradio_UI.py +3 -2
- __pycache__/Gradio_UI.cpython-310.pyc +0 -0
- app.py +94 -27
- tools/__pycache__/final_answer.cpython-310.pyc +0 -0
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
Gradio_UI.py
CHANGED
@@ -141,9 +141,10 @@ def stream_to_gradio(
|
|
141 |
|
142 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
143 |
# Track tokens if model provides them
|
144 |
-
if hasattr(agent.model, "last_input_token_count"):
|
145 |
total_input_tokens += agent.model.last_input_token_count
|
146 |
-
|
|
|
147 |
if isinstance(step_log, ActionStep):
|
148 |
step_log.input_token_count = agent.model.last_input_token_count
|
149 |
step_log.output_token_count = agent.model.last_output_token_count
|
|
|
141 |
|
142 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
143 |
# Track tokens if model provides them
|
144 |
+
if hasattr(agent.model, "last_input_token_count") and agent.model.last_input_token_count is not None:
|
145 |
total_input_tokens += agent.model.last_input_token_count
|
146 |
+
if agent.model.last_output_token_count is not None:
|
147 |
+
total_output_tokens += agent.model.last_output_token_count
|
148 |
if isinstance(step_log, ActionStep):
|
149 |
step_log.input_token_count = agent.model.last_input_token_count
|
150 |
step_log.output_token_count = agent.model.last_output_token_count
|
__pycache__/Gradio_UI.cpython-310.pyc
ADDED
Binary file (6.86 kB). View file
|
|
app.py
CHANGED
@@ -1,4 +1,27 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import datetime
|
3 |
import requests
|
4 |
import pytz
|
@@ -7,7 +30,14 @@ from tools.final_answer import FinalAnswerTool
|
|
7 |
|
8 |
from Gradio_UI import GradioUI
|
9 |
|
10 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
@tool
|
12 |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
|
13 |
#Keep this format for the description / args / args description but feel free to modify the tool
|
@@ -34,22 +64,31 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
34 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
35 |
|
36 |
|
37 |
-
# Import tool from Hub
|
|
|
38 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
39 |
|
40 |
@tool
|
41 |
-
def generate_image(prompt:str) ->
|
42 |
"""Generate image(s) from a text prompt via HF text-to-image.
|
43 |
Args:
|
44 |
prompt: description of the image
|
45 |
"""
|
46 |
-
return image_generation_tool(prompt)
|
47 |
|
48 |
@tool
|
49 |
def duckduckgo_search(query: str, max_results: int = 5) -> str:
|
50 |
-
"""Search DuckDuckGo for a query and return the top N results.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
searcher = DuckDuckGoSearchTool(max_results=max_results)
|
52 |
-
return searcher(query)
|
53 |
|
54 |
|
55 |
@tool
|
@@ -82,36 +121,64 @@ def is_prime(number: int) -> bool:
|
|
82 |
|
83 |
|
84 |
|
85 |
-
|
|
|
|
|
86 |
|
87 |
-
#
|
88 |
-
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
)
|
96 |
|
97 |
|
98 |
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
|
|
103 |
agent = CodeAgent(
|
104 |
model=model,
|
105 |
-
tools
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
name=None,
|
112 |
-
description=None
|
113 |
-
prompt_templates
|
114 |
)
|
115 |
|
116 |
-
|
|
|
|
|
|
|
|
|
117 |
GradioUI(agent).launch()
|
|
|
1 |
+
# =====================================================================
|
2 |
+
# Local AI Agent using smolagents with LiteLLM and Ollama
|
3 |
+
# =====================================================================
|
4 |
+
# This application creates a local AI agent that can:
|
5 |
+
# - Search the web using DuckDuckGo
|
6 |
+
# - Generate images using Hugging Face's text-to-image model
|
7 |
+
# - Get current time in different timezones
|
8 |
+
# - Check if a number is prime
|
9 |
+
# - And more!
|
10 |
+
#
|
11 |
+
# IMPORTANT: This agent requires local execution with Ollama running on port 11434
|
12 |
+
# Remote models from Hugging Face (like Qwen2.5-Coder or Mistral-7B) are often overloaded
|
13 |
+
# and may return 'Payment Required' errors or be paused by their providers.
|
14 |
+
#
|
15 |
+
# Setup requirements:
|
16 |
+
# 1. Install Ollama (https://ollama.ai/)
|
17 |
+
# 2. Pull the llama3.2 model: `ollama pull llama3.2`
|
18 |
+
# 3. Ensure Ollama is running before starting this application
|
19 |
+
# 4. Install Python dependencies from requirements.txt
|
20 |
+
# =====================================================================
|
21 |
+
|
22 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, load_tool, tool
|
23 |
+
from smolagents.models import LiteLLMModel
|
24 |
+
import os
|
25 |
import datetime
|
26 |
import requests
|
27 |
import pytz
|
|
|
30 |
|
31 |
from Gradio_UI import GradioUI
|
32 |
|
33 |
+
# =====================================================================
|
34 |
+
# Tool Definitions
|
35 |
+
# =====================================================================
|
36 |
+
# Each tool is defined with the @tool decorator and includes docstrings
|
37 |
+
# that help the agent understand when and how to use each tool.
|
38 |
+
# =====================================================================
|
39 |
+
|
40 |
+
# Simple example tool that can be customized
|
41 |
@tool
|
42 |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
|
43 |
#Keep this format for the description / args / args description but feel free to modify the tool
|
|
|
64 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
65 |
|
66 |
|
67 |
+
# Import image generation tool from Hugging Face Hub
|
68 |
+
# This allows the agent to create images based on text prompts
|
69 |
image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
|
70 |
|
71 |
@tool
|
72 |
+
def generate_image(prompt:str) -> str:
|
73 |
"""Generate image(s) from a text prompt via HF text-to-image.
|
74 |
Args:
|
75 |
prompt: description of the image
|
76 |
"""
|
77 |
+
return image_generation_tool(prompt)
|
78 |
|
79 |
@tool
|
80 |
def duckduckgo_search(query: str, max_results: int = 5) -> str:
|
81 |
+
"""Search DuckDuckGo for a query and return the top N results.
|
82 |
+
|
83 |
+
Args:
|
84 |
+
query: The search query string to look up on DuckDuckGo
|
85 |
+
max_results: Maximum number of search results to return (default is 5)
|
86 |
+
|
87 |
+
Returns:
|
88 |
+
A string containing the search results
|
89 |
+
"""
|
90 |
searcher = DuckDuckGoSearchTool(max_results=max_results)
|
91 |
+
return searcher(query)
|
92 |
|
93 |
|
94 |
@tool
|
|
|
121 |
|
122 |
|
123 |
|
124 |
+
# =====================================================================
|
125 |
+
# Agent Configuration
|
126 |
+
# =====================================================================
|
127 |
|
128 |
+
# The FinalAnswerTool is used to provide final responses to the user
|
129 |
+
final_answer = FinalAnswerTool()
|
130 |
|
131 |
+
# IMPORTANT: Remote models are often overloaded or require payment
|
132 |
+
# Previous attempts to use these models resulted in errors:
|
133 |
+
# - Qwen/Qwen2.5-Coder-32B-Instruct: "Payment Required" error
|
134 |
+
# - mistralai/Mistral-7B-Instruct-v0.2: Model was paused
|
135 |
+
#
|
136 |
+
# Alternative HF endpoint (if needed):
|
137 |
+
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
138 |
+
#
|
139 |
+
# Instead, we use a local Ollama model which is more reliable:
|
140 |
+
|
141 |
+
# Configure LiteLLM to use local Ollama instance
|
142 |
+
os.environ["LITELLM_OLLAMA_API_BASE"] = "http://localhost:11434"
|
143 |
+
|
144 |
+
# Initialize the model with appropriate parameters
|
145 |
+
model = LiteLLMModel(
|
146 |
+
model_name="ollama/llama3.2", # Using the locally available Llama3.2 model
|
147 |
+
max_tokens=1024, # Maximum tokens in the response
|
148 |
+
temperature=0.7, # Controls randomness (higher = more creative)
|
149 |
+
model_id="ollama/llama3.2" # Explicitly set model_id to avoid default to Claude
|
150 |
)
|
151 |
|
152 |
|
153 |
|
154 |
|
155 |
+
# =====================================================================
|
156 |
+
# Agent Initialization
|
157 |
+
# =====================================================================
|
158 |
+
# Initialize the agent with the configured model and tools
|
159 |
agent = CodeAgent(
|
160 |
model=model,
|
161 |
+
# List of tools available to the agent - add new tools here
|
162 |
+
tools=[
|
163 |
+
final_answer, # Required for providing final answers
|
164 |
+
duckduckgo_search, # Web search capability
|
165 |
+
get_current_time_in_timezone, # Time-related functions
|
166 |
+
my_custom_tool, # Example custom tool
|
167 |
+
generate_image, # Image generation
|
168 |
+
is_prime # Prime number checker
|
169 |
+
],
|
170 |
+
max_steps=6, # Maximum reasoning steps per query
|
171 |
+
verbosity_level=1, # Controls logging detail
|
172 |
+
grammar=None, # Optional grammar constraints
|
173 |
+
planning_interval=None, # How often to plan next steps
|
174 |
name=None,
|
175 |
+
description=None
|
176 |
+
# Not specifying prompt_templates will use the default ones from smolagents
|
177 |
)
|
178 |
|
179 |
+
# =====================================================================
|
180 |
+
# Launch the Gradio Web Interface
|
181 |
+
# =====================================================================
|
182 |
+
# This creates a user-friendly web interface for interacting with the agent
|
183 |
+
# Accessible at http://127.0.0.1:7860 by default
|
184 |
GradioUI(agent).launch()
|
tools/__pycache__/final_answer.cpython-310.pyc
ADDED
Binary file (925 Bytes). View file
|
|