ThiSecur commited on
Commit
6270c1e
·
verified ·
1 Parent(s): 90e9567

Update Gradio_UI.py

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +17 -218
Gradio_UI.py CHANGED
@@ -3,43 +3,32 @@ import os
3
  import re
4
  import shutil
5
  from typing import Optional
6
-
7
  from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
8
  from smolagents.agents import ActionStep, MultiStepAgent
9
  from smolagents.memory import MemoryStep
10
  from smolagents.utils import _is_package_available
11
 
12
-
13
- def pull_messages_from_step(
14
- step_log: MemoryStep,
15
- ):
16
  """Extract ChatMessage objects from agent steps with proper nesting"""
17
  import gradio as gr
18
 
19
  if isinstance(step_log, ActionStep):
20
- # Output the step number
21
  step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
22
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
23
 
24
- # First yield the thought/reasoning from the LLM
25
  if hasattr(step_log, "model_output") and step_log.model_output is not None:
26
- # Clean up the LLM output
27
  model_output = step_log.model_output.strip()
28
- # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
29
- model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
30
- model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
31
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
32
  model_output = model_output.strip()
33
  yield gr.ChatMessage(role="assistant", content=model_output)
34
 
35
- # For tool calls, create a parent message
36
  if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
37
  first_tool_call = step_log.tool_calls[0]
38
  used_code = first_tool_call.name == "python_interpreter"
39
  parent_id = f"call_{len(step_log.tool_calls)}"
40
 
41
- # Tool call becomes the parent message with timing info
42
- # First we will handle arguments based on type
43
  args = first_tool_call.arguments
44
  if isinstance(args, dict):
45
  content = str(args.get("answer", str(args)))
@@ -47,9 +36,8 @@ def pull_messages_from_step(
47
  content = str(args).strip()
48
 
49
  if used_code:
50
- # Clean up the content by removing any end code tags
51
- content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
52
- content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
53
  content = content.strip()
54
  if not content.startswith("```python"):
55
  content = f"```python\n{content}\n```"
@@ -65,10 +53,7 @@ def pull_messages_from_step(
65
  )
66
  yield parent_message_tool
67
 
68
- # Nesting execution logs under the tool call if they exist
69
- if hasattr(step_log, "observations") and (
70
- step_log.observations is not None and step_log.observations.strip()
71
- ): # Only yield execution logs if there's actual content
72
  log_content = step_log.observations.strip()
73
  if log_content:
74
  log_content = re.sub(r"^Execution logs:\s*", "", log_content)
@@ -78,7 +63,6 @@ def pull_messages_from_step(
78
  metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
79
  )
80
 
81
- # Nesting any errors under the tool call
82
  if hasattr(step_log, "error") and step_log.error is not None:
83
  yield gr.ChatMessage(
84
  role="assistant",
@@ -86,19 +70,14 @@ def pull_messages_from_step(
86
  metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
87
  )
88
 
89
- # Update parent message metadata to done status without yielding a new message
90
  parent_message_tool.metadata["status"] = "done"
91
 
92
- # Handle standalone errors but not from tool calls
93
  elif hasattr(step_log, "error") and step_log.error is not None:
94
  yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
95
 
96
- # Calculate duration and token information
97
  step_footnote = f"{step_number}"
98
  if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
99
- token_str = (
100
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
101
- )
102
  step_footnote += token_str
103
  if hasattr(step_log, "duration"):
104
  step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
@@ -107,25 +86,16 @@ def pull_messages_from_step(
107
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
108
  yield gr.ChatMessage(role="assistant", content="-----")
109
 
110
-
111
- def stream_to_gradio(
112
- agent,
113
- task: str,
114
- reset_agent_memory: bool = False,
115
- additional_args: Optional[dict] = None,
116
- ):
117
- """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
118
  if not _is_package_available("gradio"):
119
- raise ModuleNotFoundError(
120
- "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
121
- )
122
  import gradio as gr
123
 
124
  total_input_tokens = 0
125
  total_output_tokens = 0
126
 
127
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
128
- # Track tokens if model provides them
129
  if hasattr(agent.model, "last_input_token_count"):
130
  total_input_tokens += agent.model.last_input_token_count
131
  total_output_tokens += agent.model.last_output_token_count
@@ -133,12 +103,10 @@ def stream_to_gradio(
133
  step_log.input_token_count = agent.model.last_input_token_count
134
  step_log.output_token_count = agent.model.last_output_token_count
135
 
136
- for message in pull_messages_from_step(
137
- step_log,
138
- ):
139
  yield message
140
 
141
- final_answer = step_log # Last log is the run's final_answer
142
  final_answer = handle_agent_output_types(final_answer)
143
 
144
  if isinstance(final_answer, AgentText):
@@ -159,15 +127,12 @@ def stream_to_gradio(
159
  else:
160
  yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
161
 
162
-
163
  class GradioUI:
164
- """A one-line interface to launch your agent in Gradio"""
165
-
166
  def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
167
  if not _is_package_available("gradio"):
168
- raise ModuleNotFoundError(
169
- "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
170
- )
171
  self.agent = agent
172
  self.file_upload_folder = file_upload_folder
173
  if self.file_upload_folder is not None:
@@ -176,172 +141,6 @@ class GradioUI:
176
 
177
  def interact_with_agent(self, prompt, messages):
178
  import gradio as gr
179
-
180
  messages.append(gr.ChatMessage(role="user", content=prompt))
181
  yield messages
182
- for msg in stream_to_gradio(self.agent, task=prompt, reset_agent_memory=False):
183
- messages.append(msg)
184
- yield messages
185
- yield messages
186
-
187
- def upload_file(
188
- self,
189
- file,
190
- file_uploads_log,
191
- allowed_file_types=[
192
- "application/pdf",
193
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
194
- "text/plain",
195
- ],
196
- ):
197
- """
198
- Handle file uploads, default allowed types are .pdf, .docx, and .txt
199
- """
200
- import gradio as gr
201
-
202
- if file is None:
203
- return gr.Textbox("No file uploaded", visible=True), file_uploads_log
204
-
205
- try:
206
- mime_type, _ = mimetypes.guess_type(file.name)
207
- except Exception as e:
208
- return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log
209
-
210
- if mime_type not in allowed_file_types:
211
- return gr.Textbox("File type disallowed", visible=True), file_uploads_log
212
-
213
- # Sanitize file name
214
- original_name = os.path.basename(file.name)
215
- sanitized_name = re.sub(
216
- r"[^\w\-.]", "_", original_name
217
- ) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores
218
-
219
- type_to_ext = {}
220
- for ext, t in mimetypes.types_map.items():
221
- if t not in type_to_ext:
222
- type_to_ext[t] = ext
223
-
224
- # Ensure the extension correlates to the mime type
225
- sanitized_name = sanitized_name.split(".")[:-1]
226
- sanitized_name.append("" + type_to_ext[mime_type])
227
- sanitized_name = "".join(sanitized_name)
228
-
229
- # Save the uploaded file to the specified folder
230
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
231
- shutil.copy(file.name, file_path)
232
-
233
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
234
-
235
- def log_user_message(self, text_input, file_uploads_log):
236
- return (
237
- text_input
238
- + (
239
- f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}"
240
- if len(file_uploads_log) > 0
241
- else ""
242
- ),
243
- "",
244
- )
245
-
246
- def launch(self, **kwargs):
247
- import gradio as gr
248
-
249
- with gr.Blocks(title="AI Assistant", fill_height=True) as demo:
250
- # Welcome message and description
251
- gr.Markdown("""
252
- # 🤖 Welcome to Your AI Assistant
253
-
254
- This intelligent agent can help you with various tasks by:
255
- - Answering questions
256
- - Processing text and documents
257
- - Performing calculations and analysis
258
- - And much more!
259
-
260
- Simply type your request below and the AI will assist you.
261
- """)
262
-
263
- # File upload section
264
- with gr.Accordion("ℹ️ How to use", open=False):
265
- gr.Markdown("""
266
- **Getting started:**
267
- 1. Type your question or request in the chat box
268
- 2. Optionally upload files (PDF, DOCX, TXT) for the AI to reference
269
- 3. Press Enter or click Send to get your answer
270
-
271
- **Examples to try:**
272
- - "Explain quantum computing in simple terms"
273
- - "Summarize the key points from this document" (after uploading)
274
- - "Write a poem about artificial intelligence"
275
- """)
276
-
277
- stored_messages = gr.State([])
278
- file_uploads_log = gr.State([])
279
-
280
- # Chat interface
281
- chatbot = gr.Chatbot(
282
- label="Chat with AI Assistant",
283
- type="messages",
284
- avatar_images=(
285
- None,
286
- "https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/communication/Alfred.png",
287
- ),
288
- height=500,
289
- render=True,
290
- )
291
-
292
- # File upload functionality
293
- if self.file_upload_folder is not None:
294
- with gr.Row():
295
- upload_file = gr.File(
296
- label="Upload documents (PDF, DOCX, TXT)",
297
- file_types=[".pdf", ".docx", ".txt"]
298
- )
299
- upload_status = gr.Textbox(
300
- label="Upload Status",
301
- interactive=False,
302
- visible=False
303
- )
304
-
305
- upload_file.change(
306
- self.upload_file,
307
- [upload_file, file_uploads_log],
308
- [upload_status, file_uploads_log],
309
- )
310
-
311
- # Input area
312
- with gr.Row():
313
- text_input = gr.Textbox(
314
- placeholder="Type your message here...",
315
- label="Your message",
316
- lines=2,
317
- max_lines=5,
318
- container=False,
319
- scale=7
320
- )
321
- submit_btn = gr.Button("Send", variant="primary", scale=1)
322
-
323
- # Interaction handlers
324
- text_input.submit(
325
- self.log_user_message,
326
- [text_input, file_uploads_log],
327
- [stored_messages, text_input],
328
- ).then(
329
- self.interact_with_agent,
330
- [stored_messages, chatbot],
331
- [chatbot]
332
- )
333
-
334
- submit_btn.click(
335
- self.log_user_message,
336
- [text_input, file_uploads_log],
337
- [stored_messages, text_input],
338
- ).then(
339
- self.interact_with_agent,
340
- [stored_messages, chatbot],
341
- [chatbot]
342
- )
343
-
344
- demo.launch(debug=True, share=True, **kwargs)
345
-
346
-
347
- __all__ = ["stream_to_gradio", "GradioUI"]
 
3
  import re
4
  import shutil
5
  from typing import Optional
 
6
  from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
7
  from smolagents.agents import ActionStep, MultiStepAgent
8
  from smolagents.memory import MemoryStep
9
  from smolagents.utils import _is_package_available
10
 
11
+ def pull_messages_from_step(step_log: MemoryStep):
 
 
 
12
  """Extract ChatMessage objects from agent steps with proper nesting"""
13
  import gradio as gr
14
 
15
  if isinstance(step_log, ActionStep):
 
16
  step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
17
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
18
 
 
19
  if hasattr(step_log, "model_output") and step_log.model_output is not None:
 
20
  model_output = step_log.model_output.strip()
21
+ model_output = re.sub(r"```\s*<end_code>", "```", model_output)
22
+ model_output = re.sub(r"<end_code>\s*```", "```", model_output)
23
+ model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output)
 
24
  model_output = model_output.strip()
25
  yield gr.ChatMessage(role="assistant", content=model_output)
26
 
 
27
  if hasattr(step_log, "tool_calls") and step_log.tool_calls is not None:
28
  first_tool_call = step_log.tool_calls[0]
29
  used_code = first_tool_call.name == "python_interpreter"
30
  parent_id = f"call_{len(step_log.tool_calls)}"
31
 
 
 
32
  args = first_tool_call.arguments
33
  if isinstance(args, dict):
34
  content = str(args.get("answer", str(args)))
 
36
  content = str(args).strip()
37
 
38
  if used_code:
39
+ content = re.sub(r"```.*?\n", "", content)
40
+ content = re.sub(r"\s*<end_code>\s*", "", content)
 
41
  content = content.strip()
42
  if not content.startswith("```python"):
43
  content = f"```python\n{content}\n```"
 
53
  )
54
  yield parent_message_tool
55
 
56
+ if hasattr(step_log, "observations") and (step_log.observations is not None and step_log.observations.strip()):
 
 
 
57
  log_content = step_log.observations.strip()
58
  if log_content:
59
  log_content = re.sub(r"^Execution logs:\s*", "", log_content)
 
63
  metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
64
  )
65
 
 
66
  if hasattr(step_log, "error") and step_log.error is not None:
67
  yield gr.ChatMessage(
68
  role="assistant",
 
70
  metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
71
  )
72
 
 
73
  parent_message_tool.metadata["status"] = "done"
74
 
 
75
  elif hasattr(step_log, "error") and step_log.error is not None:
76
  yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
77
 
 
78
  step_footnote = f"{step_number}"
79
  if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
80
+ token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
 
 
81
  step_footnote += token_str
82
  if hasattr(step_log, "duration"):
83
  step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
 
86
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
87
  yield gr.ChatMessage(role="assistant", content="-----")
88
 
89
+ def stream_to_gradio(agent, task: str, reset_agent_memory: bool = False, additional_args: Optional[dict] = None):
90
+ """Stream agent responses to Gradio interface"""
 
 
 
 
 
 
91
  if not _is_package_available("gradio"):
92
+ raise ModuleNotFoundError("Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`")
 
 
93
  import gradio as gr
94
 
95
  total_input_tokens = 0
96
  total_output_tokens = 0
97
 
98
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
 
99
  if hasattr(agent.model, "last_input_token_count"):
100
  total_input_tokens += agent.model.last_input_token_count
101
  total_output_tokens += agent.model.last_output_token_count
 
103
  step_log.input_token_count = agent.model.last_input_token_count
104
  step_log.output_token_count = agent.model.last_output_token_count
105
 
106
+ for message in pull_messages_from_step(step_log):
 
 
107
  yield message
108
 
109
+ final_answer = step_log
110
  final_answer = handle_agent_output_types(final_answer)
111
 
112
  if isinstance(final_answer, AgentText):
 
127
  else:
128
  yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
129
 
 
130
  class GradioUI:
131
+ """Custom Gradio interface for the agent with specialized tools"""
132
+
133
  def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
134
  if not _is_package_available("gradio"):
135
+ raise ModuleNotFoundError("Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`")
 
 
136
  self.agent = agent
137
  self.file_upload_folder = file_upload_folder
138
  if self.file_upload_folder is not None:
 
141
 
142
  def interact_with_agent(self, prompt, messages):
143
  import gradio as gr
 
144
  messages.append(gr.ChatMessage(role="user", content=prompt))
145
  yield messages
146
+