Spaces:
Sleeping
Sleeping
Brunwo
commited on
Commit
·
7f53c1a
1
Parent(s):
49f52ec
first HF spaces deploy
Browse files- HFHub.py +111 -0
- app.py +228 -58
- requirements.txt +5 -1
HFHub.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#%%writefile HFHub.py
|
2 |
+
|
3 |
+
#utility file
|
4 |
+
|
5 |
+
from smolagents.default_tools import __all__ as dft_tools
|
6 |
+
from huggingface_hub import login, InferenceClient
|
7 |
+
import logging
|
8 |
+
from smolagents import ToolCollection
|
9 |
+
from smolagents import HfApiModel, CodeAgent, Tool, LiteLLMModel
|
10 |
+
from smolagents import ToolCollection
|
11 |
+
from smolagents import HfApiModel, CodeAgent, Tool
|
12 |
+
|
13 |
+
publishtoken = None;
|
14 |
+
|
15 |
+
tool_Collections: dict[str, ToolCollection] = {}
|
16 |
+
|
17 |
+
|
18 |
+
def all_tools() -> list[Tool]:
|
19 |
+
all_tools = []
|
20 |
+
for collection in tool_Collections.values():
|
21 |
+
if isinstance(collection, ToolCollection):
|
22 |
+
for tool in collection.tools:
|
23 |
+
all_tools.append(tool)
|
24 |
+
else:
|
25 |
+
for tool in collection:
|
26 |
+
all_tools.append(tool)
|
27 |
+
|
28 |
+
return all_tools
|
29 |
+
|
30 |
+
def filter_tools(tools):
|
31 |
+
base_tools_names = ['web search']
|
32 |
+
# dft_tools
|
33 |
+
# logging.warning(f"Filtering tools, excluding: {exclude_tool}") # Log the exclusion
|
34 |
+
filtered_tools = [tool for tool in tools if tool.name not in base_tools_names]
|
35 |
+
logging.warning(f"Number of tools after filtering: {len(filtered_tools)}") # Log the result
|
36 |
+
return filtered_tools
|
37 |
+
|
38 |
+
additional_authorized_imports=["smolagents","subprocess","typing","os", "inspect","open", "requests"]
|
39 |
+
|
40 |
+
|
41 |
+
litellm_api_keys = {
|
42 |
+
'xai': '',
|
43 |
+
'HF': '',
|
44 |
+
'grok': '',
|
45 |
+
'anthropic': '',
|
46 |
+
'openAI': '',
|
47 |
+
# ...
|
48 |
+
}
|
49 |
+
|
50 |
+
def get_litellm_api_key(key_name):
|
51 |
+
return litellm_api_keys.get(key_name, '')
|
52 |
+
|
53 |
+
def login_for_publication() :
|
54 |
+
login(publishtoken)
|
55 |
+
|
56 |
+
|
57 |
+
def load_collection_from_space(agent : CodeAgent, collection_slug: str = "Mightypeacock/agent-tools-6777c9699c231b7a1e87fa31" ) -> ToolCollection:
|
58 |
+
|
59 |
+
if collection_slug not in tool_Collections:
|
60 |
+
tool_collection = ToolCollection(
|
61 |
+
collection_slug=collection_slug,
|
62 |
+
# token=publishtoken,
|
63 |
+
trust_remote_code=True
|
64 |
+
)
|
65 |
+
tool_Collections[collection_slug] = tool_collection
|
66 |
+
|
67 |
+
for tool in tool_collection.tools:
|
68 |
+
if agent.toolbox.tools.get(tool.name) is None:
|
69 |
+
agent.toolbox.add_tool(tool)
|
70 |
+
else:
|
71 |
+
agent.toolbox.update_tool(tool)
|
72 |
+
|
73 |
+
return all_tools()
|
74 |
+
|
75 |
+
|
76 |
+
def createAgent():
|
77 |
+
|
78 |
+
agent = CodeAgent(
|
79 |
+
tools=filter_tools(all_tools()),
|
80 |
+
# all_tools(),#[tool for col in tool_Collections.values() for tool in col.tools], # need to flatmap those probably
|
81 |
+
# tools=[],
|
82 |
+
# model=HfApiModel("microsoft/phi-4"),
|
83 |
+
# model=HfApiModel("Leonuraht/Phi-4"), #space
|
84 |
+
|
85 |
+
# model = LiteLLMModel(model_id=
|
86 |
+
# # "gpt-4o"
|
87 |
+
# "anthropic/claude-3-5-sonnet-20240620"
|
88 |
+
# # xai/<any-model-on-xai> : grok-beta
|
89 |
+
# ),
|
90 |
+
|
91 |
+
model=HfApiModel(
|
92 |
+
# # "lmstudio-community/phi-4-GGUF" #working
|
93 |
+
),
|
94 |
+
|
95 |
+
additional_authorized_imports=additional_authorized_imports,
|
96 |
+
add_base_tools=True,
|
97 |
+
planning_interval=None,
|
98 |
+
# use_e2b_executor=True,
|
99 |
+
verbose=True
|
100 |
+
|
101 |
+
)
|
102 |
+
|
103 |
+
for tool in agent.toolbox.tools.values():
|
104 |
+
if tool not in all_tools():
|
105 |
+
if "base tools" not in tool_Collections:
|
106 |
+
tool_Collections["base tools"] = []
|
107 |
+
|
108 |
+
tool_Collections["base tools"].append(tool)
|
109 |
+
|
110 |
+
return agent
|
111 |
+
|
app.py
CHANGED
@@ -1,64 +1,234 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
"""
|
5 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
"""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
-
|
9 |
-
|
10 |
-
def respond(
|
11 |
-
message,
|
12 |
-
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
-
|
20 |
-
for val in history:
|
21 |
-
if val[0]:
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
-
|
26 |
-
messages.append({"role": "user", "content": message})
|
27 |
-
|
28 |
-
response = ""
|
29 |
-
|
30 |
-
for message in client.chat_completion(
|
31 |
-
messages,
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
-
|
39 |
-
response += token
|
40 |
-
yield response
|
41 |
-
|
42 |
-
|
43 |
-
"""
|
44 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
45 |
-
"""
|
46 |
-
demo = gr.ChatInterface(
|
47 |
-
respond,
|
48 |
-
additional_inputs=[
|
49 |
-
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
|
50 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
51 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
52 |
-
gr.Slider(
|
53 |
-
minimum=0.1,
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
-
],
|
60 |
-
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
import random
|
4 |
+
import time
|
5 |
+
from io import StringIO
|
6 |
+
import sys
|
7 |
+
import pprint
|
8 |
+
import inspect
|
9 |
+
from HFHub import login_for_publication, litellm_api_keys, load_collection_from_space
|
10 |
+
from HFHub import createAgent
|
11 |
+
from typing import Dict
|
12 |
+
import os
|
13 |
+
import logging
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
+
def dropdown_update_choices(elt, choices):
|
17 |
+
print(f"updating dropdown: {elt} with {choices}")
|
18 |
+
return gr.update(elt, choices=choices, value=None)
|
19 |
+
|
20 |
+
# Configure logging
|
21 |
+
logging.basicConfig(level=logging.INFO) # not working (prints warnings tho)
|
22 |
+
|
23 |
+
# os.environ['HF_TOKEN'] = "..."
|
24 |
+
# hf_token = os.environ.get("HF_TOKEN")
|
25 |
+
|
26 |
+
logging.warning('start')
|
27 |
+
|
28 |
+
|
29 |
+
def process_logs(agent):
|
30 |
+
logs = ""
|
31 |
+
if hasattr(agent, 'logs'):
|
32 |
+
for entry in agent.logs:
|
33 |
+
if hasattr(entry, 'llm_output'):
|
34 |
+
logs += str(entry.llm_output) + "\n"
|
35 |
+
return logs
|
36 |
+
else:
|
37 |
+
return "The agent object does not have a valid 'logs' attribute or the attribute is not a list."
|
38 |
+
|
39 |
+
def get_tools():
|
40 |
+
return [{"name": tool.name, "description": tool.description} for tool in agent.toolbox.tools.values()]
|
41 |
+
|
42 |
+
def get_functions():
|
43 |
+
return agent.python_executor.custom_tools
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
def get_function_code(selected_function_name):
|
48 |
+
func = get_functions().get(selected_function_name)
|
49 |
+
if func:
|
50 |
+
try:
|
51 |
+
return inspect.getsource(func)
|
52 |
+
|
53 |
+
except OSError:
|
54 |
+
return "Source code not available for this function."
|
55 |
+
return "Function not found."
|
56 |
+
|
57 |
+
def get_tool_description(selected_tool_name, tools):
|
58 |
+
for tool in tools:
|
59 |
+
if tool["name"] == selected_tool_name:
|
60 |
+
return tool["description"]
|
61 |
+
return "No description available."
|
62 |
+
|
63 |
+
|
64 |
+
def refresh_ui_elements():
|
65 |
+
|
66 |
+
updated_tools = get_tools()
|
67 |
+
updated_functions = get_functions()
|
68 |
+
|
69 |
+
tool_names = [tool["name"] for tool in updated_tools]
|
70 |
+
function_names = list(updated_functions.keys())
|
71 |
+
|
72 |
+
print(function_names)
|
73 |
+
|
74 |
+
current_tool = tool_names[0] if tool_names else None
|
75 |
+
current_function = function_names[0] if function_names else None
|
76 |
+
|
77 |
+
tool_description = get_tool_description(current_tool, updated_tools)
|
78 |
+
|
79 |
+
function_code = get_function_code(current_function) if current_function else ""
|
80 |
+
|
81 |
+
dropdown_update_choices(function_dropdown,function_names)
|
82 |
+
dropdown_update_choices(tool_dropdown,tool_names)
|
83 |
+
|
84 |
+
return (
|
85 |
+
tool_dropdown,
|
86 |
+
function_dropdown,
|
87 |
+
tool_description,
|
88 |
+
function_code
|
89 |
+
)
|
90 |
+
|
91 |
+
|
92 |
+
def load_collection_andUploadUI(collection_slug: str ):
|
93 |
+
load_collection_from_space(agent,collection_slug)
|
94 |
+
refresh_ui_elements()
|
95 |
+
|
96 |
+
with gr.Blocks() as demo:
|
97 |
+
with gr.Row():
|
98 |
+
with gr.Column():
|
99 |
+
with gr.Tab("Chat"):
|
100 |
+
gr.Markdown("<center><h1>smolAgent chat</h1></center>")
|
101 |
+
chatbot = gr.Chatbot(type="messages")
|
102 |
+
msg = gr.Textbox()
|
103 |
+
|
104 |
+
send_button = gr.Button("Send") # Send button
|
105 |
+
|
106 |
+
# send_button.click(respond, [msg, chatbot], [msg, chatbot])
|
107 |
+
|
108 |
+
clear = gr.ClearButton([msg, chatbot])
|
109 |
+
|
110 |
+
with gr.Tab("Console"):
|
111 |
+
outputbox = gr.Textbox(
|
112 |
+
lines=25,
|
113 |
+
scale=1,
|
114 |
+
interactive=False
|
115 |
+
)
|
116 |
+
|
117 |
+
with gr.Tab("config"):
|
118 |
+
|
119 |
+
local_storage = gr.BrowserState(["", ""])
|
120 |
+
gr.Markdown("## Configure litellm API Keys")
|
121 |
+
api_key_inputs = {}
|
122 |
+
for provider in litellm_api_keys.keys():
|
123 |
+
with gr.Row():
|
124 |
+
api_key_inputs[provider] = gr.Textbox(
|
125 |
+
label=f'{provider} API Key',
|
126 |
+
placeholder='Enter key',
|
127 |
+
type='password',
|
128 |
+
value=litellm_api_keys[provider]
|
129 |
+
)
|
130 |
+
|
131 |
+
#TODO : save keys to localstorage
|
132 |
+
|
133 |
+
# api_key_inputs[provider]
|
134 |
+
# save_keys_button = gr.Button('Save Configuration')
|
135 |
+
|
136 |
+
|
137 |
+
# @gr.on(api_key_inputs[provider].change, inputs=[api_key_inputs[provider]], outputs=[local_storage])
|
138 |
+
# def save_to_local_storage(text):
|
139 |
+
# return [text]
|
140 |
+
|
141 |
+
# # Load from local storage when page loads
|
142 |
+
# @demo.load(inputs=[local_storage], outputs=[input_text])
|
143 |
+
# def load_from_local_storage(saved_values):
|
144 |
+
# return saved_values[0]
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
with gr.Column():
|
149 |
+
gr.Markdown("<center><h1>tool collection</h1></center>")
|
150 |
+
|
151 |
+
|
152 |
+
tools = get_tools()
|
153 |
+
tool_dropdown = gr.Dropdown(
|
154 |
+
show_label=False,
|
155 |
+
choices=[tool["name"] for tool in tools],
|
156 |
+
value=tools[0]["name"] if tools else None,
|
157 |
+
type="value",
|
158 |
+
allow_custom_value=False,
|
159 |
+
scale=3
|
160 |
+
)
|
161 |
+
|
162 |
+
description_textbox = gr.Textbox(
|
163 |
+
label="Tool Description",
|
164 |
+
value=get_tool_description(tool_dropdown.value, tools),
|
165 |
+
interactive=False,
|
166 |
+
)
|
167 |
+
|
168 |
+
slug = gr.Textbox(label="collection slug",value="Mightypeacock/agent-tools-6777c9699c231b7a1e87fa31")
|
169 |
+
greet_btn = gr.Button("Load")
|
170 |
+
greet_btn.click(fn=load_collection_from_space, inputs=slug,
|
171 |
+
# outputs=tool_dropdown,
|
172 |
+
outputs=None,
|
173 |
+
api_name="load_HF_Collection")
|
174 |
+
|
175 |
+
|
176 |
+
gr.Markdown("<center><h2>Functions</h2></center>")
|
177 |
+
functions = get_functions()
|
178 |
+
function_dropdown = gr.Dropdown(
|
179 |
+
label="Select Function",
|
180 |
+
choices=list(functions.keys()),
|
181 |
+
value=None if not functions.keys() else list(functions.keys())[0],
|
182 |
+
type="value",
|
183 |
+
)
|
184 |
+
|
185 |
+
code = gr.Code(label="Function Code", language="python")
|
186 |
+
|
187 |
+
tool_dropdown.change(
|
188 |
+
fn=get_tool_description,
|
189 |
+
inputs=[tool_dropdown, gr.State(tools)],
|
190 |
+
outputs=description_textbox,
|
191 |
+
)
|
192 |
+
|
193 |
+
function_dropdown.change(
|
194 |
+
fn=get_function_code,
|
195 |
+
inputs=function_dropdown,
|
196 |
+
outputs=code,
|
197 |
+
)
|
198 |
+
|
199 |
+
def respond(message, console_output, chat_history):
|
200 |
+
try:
|
201 |
+
print(f"Received message: {message}")
|
202 |
+
|
203 |
+
if not isinstance(message, str):
|
204 |
+
message = str(message)
|
205 |
+
|
206 |
+
bot_message = agent.run(message)
|
207 |
+
new_console_output = process_logs(agent)
|
208 |
+
|
209 |
+
print(f"Agent response: {bot_message}")
|
210 |
+
print(f"Console output: {new_console_output}")
|
211 |
+
|
212 |
+
chat_history.extend([
|
213 |
+
{"role": "user", "content": message},
|
214 |
+
{"role": "assistant", "content": bot_message}
|
215 |
+
])
|
216 |
+
|
217 |
+
updated_console = console_output + f"\nQuery: {message}\nLogs: {new_console_output}"
|
218 |
+
|
219 |
+
tool_dropdown_update, function_dropdown_update, description_update, code_update = refresh_ui_elements()
|
220 |
+
return "", updated_console, chat_history, tool_dropdown_update, function_dropdown_update, description_update, code_update
|
221 |
+
|
222 |
+
except Exception as e:
|
223 |
+
print(f"Error in respond function: {e}")
|
224 |
+
return f"An error occurred: {str(e)}", console_output, chat_history, None, None, None, None
|
225 |
+
|
226 |
+
msg.submit(
|
227 |
+
respond,
|
228 |
+
inputs=[msg, outputbox, chatbot],
|
229 |
+
outputs=[msg, outputbox, chatbot, tool_dropdown, function_dropdown, description_textbox, code]
|
230 |
+
)
|
231 |
|
232 |
if __name__ == "__main__":
|
233 |
+
agent = createAgent()
|
234 |
+
demo.launch(show_error=True, debug=True)
|
requirements.txt
CHANGED
@@ -1 +1,5 @@
|
|
1 |
-
huggingface_hub==0.25.2
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub==0.25.2
|
2 |
+
git+https://github.com/Brunwo/smolagents
|
3 |
+
huggingface_hub
|
4 |
+
|
5 |
+
litellm
|