ThiSecur commited on
Commit
057964e
·
verified ·
1 Parent(s): 9be352e

Update Gradio_UI.py

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +40 -270
Gradio_UI.py CHANGED
@@ -1,288 +1,58 @@
1
- import mimetypes
2
- import os
3
- import re
4
- import shutil
5
  from typing import Optional
6
  from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
7
  from smolagents.agents import ActionStep, MultiStepAgent
8
  from smolagents.memory import MemoryStep
9
- from smolagents.utils import _is_package_available
10
-
11
- def pull_messages_from_step(step_log: MemoryStep):
12
- """Extract ChatMessage objects from agent steps with proper nesting"""
13
- import gradio as gr
14
-
15
- if isinstance(step_log, ActionStep):
16
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
17
- yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
18
-
19
- if hasattr(step_log, "model_output") and step_log.model_output is not None:
20
- model_output = step_log.model_output.strip()
21
- model_output = re.sub(r"```\s*<end_code>", "```", model_output)
22
- model_output = re.sub(r"<end_code>\s*```", "```", model_output)
23
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output)
24
- model_output = model_output.strip()
25
- yield gr.ChatMessage(role="assistant", content=model_output)
26
-
27
- if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
28
- first_tool_call = step_log.tool_calls[0]
29
- used_code = first_tool_call.name == "python_interpreter"
30
- parent_id = f"call_{len(step_log.tool_calls)}"
31
-
32
- args = first_tool_call.arguments
33
- if isinstance(args, dict):
34
- content = str(args.get("answer", str(args)))
35
- else:
36
- content = str(args).strip()
37
-
38
- if used_code:
39
- content = re.sub(r"```.*?\n", "", content)
40
- content = re.sub(r"\s*<end_code>\s*", "", content)
41
- content = content.strip()
42
- if not content.startswith("```python"):
43
- content = f"```python\n{content}\n```"
44
-
45
- parent_message_tool = gr.ChatMessage(
46
- role="assistant",
47
- content=content,
48
- metadata={
49
- "title": f"🛠️ Used tool {first_tool_call.name}",
50
- "id": parent_id,
51
- "status": "pending",
52
- },
53
- )
54
- yield parent_message_tool
55
-
56
- if hasattr(step_log, "observations") and (step_log.observations is not None and step_log.observations.strip()):
57
- log_content = step_log.observations.strip()
58
- if log_content:
59
- log_content = re.sub(r"^Execution logs:\s*", "", log_content)
60
- yield gr.ChatMessage(
61
- role="assistant",
62
- content=f"{log_content}",
63
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
64
- )
65
-
66
- if hasattr(step_log, "error") and step_log.error is not None:
67
- yield gr.ChatMessage(
68
- role="assistant",
69
- content=str(step_log.error),
70
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
71
- )
72
-
73
- parent_message_tool.metadata["status"] = "done"
74
-
75
- elif hasattr(step_log, "error") and step_log.error is not None:
76
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
77
-
78
- step_footnote = f"{step_number}"
79
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
80
- token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
81
- step_footnote += token_str
82
- if hasattr(step_log, "duration"):
83
- step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
84
- step_footnote += step_duration
85
- step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
86
- yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
87
- yield gr.ChatMessage(role="assistant", content="-----")
88
-
89
- def stream_to_gradio(agent, task: str, reset_agent_memory: bool = False, additional_args: Optional[dict] = None):
90
- """Stream agent responses to Gradio interface"""
91
- if not _is_package_available("gradio"):
92
- raise ModuleNotFoundError("Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`")
93
- import gradio as gr
94
-
95
- total_input_tokens = 0
96
- total_output_tokens = 0
97
-
98
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
99
- if hasattr(agent.model, "last_input_token_count"):
100
- total_input_tokens += agent.model.last_input_token_count
101
- total_output_tokens += agent.model.last_output_token_count
102
- if isinstance(step_log, ActionStep):
103
- step_log.input_token_count = agent.model.last_input_token_count
104
- step_log.output_token_count = agent.model.last_output_token_count
105
-
106
- for message in pull_messages_from_step(step_log):
107
- yield message
108
-
109
- final_answer = step_log
110
- final_answer = handle_agent_output_types(final_answer)
111
-
112
- if isinstance(final_answer, AgentText):
113
- yield gr.ChatMessage(
114
- role="assistant",
115
- content=f"**Final answer:**\n{final_answer.to_string()}\n",
116
- )
117
- elif isinstance(final_answer, AgentImage):
118
- yield gr.ChatMessage(
119
- role="assistant",
120
- content={"path": final_answer.to_string(), "mime_type": "image/png"},
121
- )
122
- elif isinstance(final_answer, AgentAudio):
123
- yield gr.ChatMessage(
124
- role="assistant",
125
- content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
126
- )
127
- else:
128
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
129
 
130
  class GradioUI:
131
- """Custom Gradio interface for the agent with specialized tools"""
132
 
133
- def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
134
- if not _is_package_available("gradio"):
135
- raise ModuleNotFoundError("Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`")
136
  self.agent = agent
137
- self.file_upload_folder = file_upload_folder
138
- if self.file_upload_folder is not None:
139
- if not os.path.exists(file_upload_folder):
140
- os.makedirs(file_upload_folder, exist_ok=True)
141
 
142
- def launch(self, **kwargs):
143
- import gradio as gr
144
-
145
- with gr.Blocks(title="Multi-Tool AI Assistant", theme=gr.themes.Soft(), fill_height=True) as demo:
146
- # Header with capabilities overview
147
  gr.Markdown("""
148
- # 🛠️ Multi-Tool AI Assistant
149
-
150
- This assistant specializes in:
151
- - **Time zone conversions** (e.g., "What time is 3pm EST in Tokyo?")
152
- - **Weather lookups** (e.g., "What's the weather in Paris?")
153
- - **Unit conversions** (e.g., "Convert 50 miles to kilometers")
154
- - **Web search** (e.g., "Find recent news about AI")
155
- - **Image generation** (e.g., "Create an image of a futuristic city")
156
- - **Code execution** (e.g., "Calculate factorial of 5")
157
  """)
158
 
159
- # State management
160
- stored_messages = gr.State([])
161
- file_uploads_log = gr.State([])
162
-
163
- # Chat interface
164
- with gr.Row():
165
- chatbot = gr.Chatbot(
166
- label="Conversation",
167
- avatar_images=(
168
- None,
169
- "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
170
- ),
171
- height=500,
172
- render=True,
173
- bubble_full_width=False
174
  )
 
175
 
176
- # File upload and input section
177
  with gr.Row():
178
- if self.file_upload_folder is not None:
179
- with gr.Column(scale=1):
180
- upload_file = gr.File(
181
- label="Upload documents (PDF, DOCX, TXT)",
182
- file_types=[".pdf", ".docx", ".txt"],
183
- height=100
184
- )
185
- upload_status = gr.Textbox(
186
- label="Upload Status",
187
- interactive=False,
188
- visible=False
189
- )
 
 
 
 
190
 
191
- with gr.Column(scale=4):
192
- text_input = gr.Textbox(
193
- placeholder="Type your question or request here...",
194
- label="Your message",
195
- lines=2,
196
- max_lines=5,
197
- container=False
198
- )
199
-
200
- # Control buttons
201
- with gr.Row():
202
- submit_btn = gr.Button("Send", variant="primary")
203
- clear_btn = gr.Button("Clear Chat")
204
-
205
- # Event handlers
206
- upload_file.change(
207
- self.upload_file,
208
- [upload_file, file_uploads_log],
209
- [upload_status, file_uploads_log],
210
- )
211
-
212
- text_input.submit(
213
- self.log_user_message,
214
- [text_input, file_uploads_log],
215
- [stored_messages, text_input],
216
- ).then(
217
- self.interact_with_agent,
218
- [stored_messages, chatbot],
219
- [chatbot]
220
- )
221
-
222
- submit_btn.click(
223
- self.log_user_message,
224
- [text_input, file_uploads_log],
225
- [stored_messages, text_input],
226
- ).then(
227
- self.interact_with_agent,
228
- [stored_messages, chatbot],
229
- [chatbot]
230
- )
231
 
232
- clear_btn.click(
233
- lambda: (None, [], []),
234
- outputs=[chatbot, stored_messages, file_uploads_log]
235
- )
236
-
237
- demo.launch(**kwargs)
238
-
239
- def upload_file(self, file, file_uploads_log, allowed_file_types=[
240
- "application/pdf",
241
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
242
- "text/plain"]):
243
- import gradio as gr
244
- if file is None:
245
- return gr.Textbox("No file uploaded", visible=True), file_uploads_log
246
-
247
- try:
248
- mime_type, _ = mimetypes.guess_type(file.name)
249
- except Exception as e:
250
- return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
251
-
252
- if mime_type not in allowed_file_types:
253
- return gr.Textbox("File type disallowed", visible=True), file_uploads_log
254
-
255
- original_name = os.path.basename(file.name)
256
- sanitized_name = re.sub(r"[^\w\-.]", "_", original_name)
257
-
258
- type_to_ext = {}
259
- for ext, t in mimetypes.types_map.items():
260
- if t not in type_to_ext:
261
- type_to_ext[t] = ext
262
-
263
- sanitized_name = sanitized_name.split(".")[:-1]
264
- sanitized_name.append("" + type_to_ext[mime_type])
265
- sanitized_name = "".join(sanitized_name)
266
-
267
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
268
- shutil.copy(file.name, file_path)
269
-
270
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
271
-
272
- def log_user_message(self, text_input, file_uploads_log):
273
- return (
274
- text_input + (
275
- f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
276
- if len(file_uploads_log) > 0 else ""
277
- ),
278
- "",
279
- )
280
 
281
- def interact_with_agent(self, prompt, messages):
282
- import gradio as gr
283
- messages.append(gr.ChatMessage(role="user", content=prompt))
284
- yield messages
285
- for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
286
- messages.append(msg)
287
- yield messages
288
- yield messages
 
1
+ import gradio as gr
 
 
 
2
  from typing import Optional
3
  from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
4
  from smolagents.agents import ActionStep, MultiStepAgent
5
  from smolagents.memory import MemoryStep
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  class GradioUI:
8
+ """Simplified Gradio interface for Hugging Face Spaces"""
9
 
10
+ def __init__(self, agent: MultiStepAgent):
 
 
11
  self.agent = agent
 
 
 
 
12
 
13
+ def launch(self):
14
+ with gr.Blocks(title="AI Assistant", theme=gr.themes.Soft()) as demo:
 
 
 
15
  gr.Markdown("""
16
+ # 🤖 AI Assistant
17
+ **Capabilities:**
18
+ - Time zone conversions
19
+ - Weather lookups
20
+ - Unit conversions
21
+ - Web search
22
+ - Image generation
23
+ - Code execution
 
24
  """)
25
 
26
+ chatbot = gr.Chatbot(
27
+ height=500,
28
+ avatar_images=(
29
+ None,
30
+ "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
 
 
 
 
 
 
 
 
 
 
31
  )
32
+ )
33
 
 
34
  with gr.Row():
35
+ msg = gr.Textbox(
36
+ placeholder="Ask me anything...",
37
+ container=False,
38
+ scale=7
39
+ )
40
+ submit = gr.Button("Send", scale=1)
41
+
42
+ def respond(message, chat_history):
43
+ chat_history.append((message, ""))
44
+ full_response = ""
45
+ for step_log in self.agent.run(message, stream=True):
46
+ if isinstance(step_log, ActionStep):
47
+ if hasattr(step_log, 'model_output') and step_log.model_output:
48
+ full_response += step_log.model_output + "\n"
49
+ if hasattr(step_log, 'observations') and step_log.observations:
50
+ full_response += step_log.observations + "\n"
51
 
52
+ chat_history[-1] = (message, full_response)
53
+ return "", chat_history
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
56
+ submit.click(respond, [msg, chatbot], [msg, chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ demo.launch()