elomid commited on
Commit
773c650
·
1 Parent(s): ea0dd2d

add agent description in UI

Browse files
Files changed (2) hide show
  1. Gradio_UI.py +77 -22
  2. app.py +13 -3
Gradio_UI.py CHANGED
@@ -19,7 +19,12 @@ import re
19
  import shutil
20
  from typing import Optional
21
 
22
- from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
 
 
 
 
 
23
  from smolagents.agents import ActionStep, MultiStepAgent
24
  from smolagents.memory import MemoryStep
25
  from smolagents.utils import _is_package_available
@@ -33,7 +38,9 @@ def pull_messages_from_step(
33
 
34
  if isinstance(step_log, ActionStep):
35
  # Output the step number
36
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
 
 
37
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
38
 
39
  # First yield the thought/reasoning from the LLM
@@ -41,9 +48,15 @@ def pull_messages_from_step(
41
  # Clean up the LLM output
42
  model_output = step_log.model_output.strip()
43
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
44
- model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
45
- model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
46
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
 
 
 
 
 
 
47
  model_output = model_output.strip()
48
  yield gr.ChatMessage(role="assistant", content=model_output)
49
 
@@ -63,8 +76,12 @@ def pull_messages_from_step(
63
 
64
  if used_code:
65
  # Clean up the content by removing any end code tags
66
- content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
67
- content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
 
 
 
 
68
  content = content.strip()
69
  if not content.startswith("```python"):
70
  content = f"```python\n{content}\n```"
@@ -90,7 +107,11 @@ def pull_messages_from_step(
90
  yield gr.ChatMessage(
91
  role="assistant",
92
  content=f"{log_content}",
93
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
 
 
 
 
94
  )
95
 
96
  # Nesting any errors under the tool call
@@ -98,7 +119,11 @@ def pull_messages_from_step(
98
  yield gr.ChatMessage(
99
  role="assistant",
100
  content=str(step_log.error),
101
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
 
 
 
 
102
  )
103
 
104
  # Update parent message metadata to done status without yielding a new message
@@ -106,17 +131,25 @@ def pull_messages_from_step(
106
 
107
  # Handle standalone errors but not from tool calls
108
  elif hasattr(step_log, "error") and step_log.error is not None:
109
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
 
 
 
 
110
 
111
  # Calculate duration and token information
112
  step_footnote = f"{step_number}"
113
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
114
- token_str = (
115
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
116
- )
117
  step_footnote += token_str
118
  if hasattr(step_log, "duration"):
119
- step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
 
 
 
 
120
  step_footnote += step_duration
121
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
122
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
@@ -139,7 +172,9 @@ def stream_to_gradio(
139
  total_input_tokens = 0
140
  total_output_tokens = 0
141
 
142
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
 
 
143
  # Track tokens if model provides them
144
  if hasattr(agent.model, "last_input_token_count"):
145
  total_input_tokens += agent.model.last_input_token_count
@@ -172,19 +207,27 @@ def stream_to_gradio(
172
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
173
  )
174
  else:
175
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
 
 
176
 
177
 
178
  class GradioUI:
179
  """A one-line interface to launch your agent in Gradio"""
180
 
181
- def __init__(self, agent: MultiStepAgent, file_upload_folder: str | None = None):
 
 
 
 
 
182
  if not _is_package_available("gradio"):
183
  raise ModuleNotFoundError(
184
  "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
185
  )
186
  self.agent = agent
187
  self.file_upload_folder = file_upload_folder
 
188
  if self.file_upload_folder is not None:
189
  if not os.path.exists(file_upload_folder):
190
  os.mkdir(file_upload_folder)
@@ -242,10 +285,14 @@ class GradioUI:
242
  sanitized_name = "".join(sanitized_name)
243
 
244
  # Save the uploaded file to the specified folder
245
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
 
 
246
  shutil.copy(file.name, file_path)
247
 
248
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
 
 
249
 
250
  def log_user_message(self, text_input, file_uploads_log):
251
  return (
@@ -262,6 +309,12 @@ class GradioUI:
262
  import gradio as gr
263
 
264
  with gr.Blocks(fill_height=True) as demo:
 
 
 
 
 
 
265
  stored_messages = gr.State([])
266
  file_uploads_log = gr.State([])
267
  chatbot = gr.Chatbot(
@@ -277,7 +330,9 @@ class GradioUI:
277
  # If an upload folder is provided, enable the upload feature
278
  if self.file_upload_folder is not None:
279
  upload_file = gr.File(label="Upload a file")
280
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
 
 
281
  upload_file.change(
282
  self.upload_file,
283
  [upload_file, file_uploads_log],
@@ -293,4 +348,4 @@ class GradioUI:
293
  demo.launch(debug=True, share=True, **kwargs)
294
 
295
 
296
- __all__ = ["stream_to_gradio", "GradioUI"]
 
19
  import shutil
20
  from typing import Optional
21
 
22
+ from smolagents.agent_types import (
23
+ AgentAudio,
24
+ AgentImage,
25
+ AgentText,
26
+ handle_agent_output_types,
27
+ )
28
  from smolagents.agents import ActionStep, MultiStepAgent
29
  from smolagents.memory import MemoryStep
30
  from smolagents.utils import _is_package_available
 
38
 
39
  if isinstance(step_log, ActionStep):
40
  # Output the step number
41
+ step_number = (
42
+ f"Step {step_log.step_number}" if step_log.step_number is not None else ""
43
+ )
44
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
45
 
46
  # First yield the thought/reasoning from the LLM
 
48
  # Clean up the LLM output
49
  model_output = step_log.model_output.strip()
50
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
51
+ model_output = re.sub(
52
+ r"```\s*<end_code>", "```", model_output
53
+ ) # handles ```<end_code>
54
+ model_output = re.sub(
55
+ r"<end_code>\s*```", "```", model_output
56
+ ) # handles <end_code>```
57
+ model_output = re.sub(
58
+ r"```\s*\n\s*<end_code>", "```", model_output
59
+ ) # handles ```\n<end_code>
60
  model_output = model_output.strip()
61
  yield gr.ChatMessage(role="assistant", content=model_output)
62
 
 
76
 
77
  if used_code:
78
  # Clean up the content by removing any end code tags
79
+ content = re.sub(
80
+ r"```.*?\n", "", content
81
+ ) # Remove existing code blocks
82
+ content = re.sub(
83
+ r"\s*<end_code>\s*", "", content
84
+ ) # Remove end_code tags
85
  content = content.strip()
86
  if not content.startswith("```python"):
87
  content = f"```python\n{content}\n```"
 
107
  yield gr.ChatMessage(
108
  role="assistant",
109
  content=f"{log_content}",
110
+ metadata={
111
+ "title": "📝 Execution Logs",
112
+ "parent_id": parent_id,
113
+ "status": "done",
114
+ },
115
  )
116
 
117
  # Nesting any errors under the tool call
 
119
  yield gr.ChatMessage(
120
  role="assistant",
121
  content=str(step_log.error),
122
+ metadata={
123
+ "title": "💥 Error",
124
+ "parent_id": parent_id,
125
+ "status": "done",
126
+ },
127
  )
128
 
129
  # Update parent message metadata to done status without yielding a new message
 
131
 
132
  # Handle standalone errors but not from tool calls
133
  elif hasattr(step_log, "error") and step_log.error is not None:
134
+ yield gr.ChatMessage(
135
+ role="assistant",
136
+ content=str(step_log.error),
137
+ metadata={"title": "💥 Error"},
138
+ )
139
 
140
  # Calculate duration and token information
141
  step_footnote = f"{step_number}"
142
+ if hasattr(step_log, "input_token_count") and hasattr(
143
+ step_log, "output_token_count"
144
+ ):
145
+ token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
146
  step_footnote += token_str
147
  if hasattr(step_log, "duration"):
148
+ step_duration = (
149
+ f" | Duration: {round(float(step_log.duration), 2)}"
150
+ if step_log.duration
151
+ else None
152
+ )
153
  step_footnote += step_duration
154
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
155
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
 
172
  total_input_tokens = 0
173
  total_output_tokens = 0
174
 
175
+ for step_log in agent.run(
176
+ task, stream=True, reset=reset_agent_memory, additional_args=additional_args
177
+ ):
178
  # Track tokens if model provides them
179
  if hasattr(agent.model, "last_input_token_count"):
180
  total_input_tokens += agent.model.last_input_token_count
 
207
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
208
  )
209
  else:
210
+ yield gr.ChatMessage(
211
+ role="assistant", content=f"**Final answer:** {str(final_answer)}"
212
+ )
213
 
214
 
215
  class GradioUI:
216
  """A one-line interface to launch your agent in Gradio"""
217
 
218
+ def __init__(
219
+ self,
220
+ agent: MultiStepAgent,
221
+ file_upload_folder: str | None = None,
222
+ description: str | None = None, # Add description parameter
223
+ ):
224
  if not _is_package_available("gradio"):
225
  raise ModuleNotFoundError(
226
  "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
227
  )
228
  self.agent = agent
229
  self.file_upload_folder = file_upload_folder
230
+ self.description = description # Store description
231
  if self.file_upload_folder is not None:
232
  if not os.path.exists(file_upload_folder):
233
  os.mkdir(file_upload_folder)
 
285
  sanitized_name = "".join(sanitized_name)
286
 
287
  # Save the uploaded file to the specified folder
288
+ file_path = os.path.join(
289
+ self.file_upload_folder, os.path.basename(sanitized_name)
290
+ )
291
  shutil.copy(file.name, file_path)
292
 
293
+ return gr.Textbox(
294
+ f"File uploaded: {file_path}", visible=True
295
+ ), file_uploads_log + [file_path]
296
 
297
  def log_user_message(self, text_input, file_uploads_log):
298
  return (
 
309
  import gradio as gr
310
 
311
  with gr.Blocks(fill_height=True) as demo:
312
+ # Add welcome message at the top
313
+ if self.description: # Use self.description instead of agent.description
314
+ gr.Markdown(self.description)
315
+ elif self.agent.description: # Fallback to agent.description if available
316
+ gr.Markdown(self.agent.description)
317
+
318
  stored_messages = gr.State([])
319
  file_uploads_log = gr.State([])
320
  chatbot = gr.Chatbot(
 
330
  # If an upload folder is provided, enable the upload feature
331
  if self.file_upload_folder is not None:
332
  upload_file = gr.File(label="Upload a file")
333
+ upload_status = gr.Textbox(
334
+ label="Upload Status", interactive=False, visible=False
335
+ )
336
  upload_file.change(
337
  self.upload_file,
338
  [upload_file, file_uploads_log],
 
348
  demo.launch(debug=True, share=True, **kwargs)
349
 
350
 
351
+ __all__ = ["stream_to_gradio", "GradioUI"]
app.py CHANGED
@@ -74,6 +74,17 @@ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_co
74
  with open("prompts.yaml", "r") as stream:
75
  prompt_templates = yaml.safe_load(stream)
76
 
 
 
 
 
 
 
 
 
 
 
 
77
  agent = CodeAgent(
78
  model=model,
79
  tools=[
@@ -89,9 +100,8 @@ agent = CodeAgent(
89
  grammar=None,
90
  planning_interval=None,
91
  name=None,
92
- description=None,
93
  prompt_templates=prompt_templates,
94
  )
95
 
96
-
97
- GradioUI(agent).launch()
 
74
  with open("prompts.yaml", "r") as stream:
75
  prompt_templates = yaml.safe_load(stream)
76
 
77
+ welcome_message = """
78
+ ## Welcome!
79
+ I can help you with:
80
+ - Getting cryptocurrency prices (e.g. "What's the current price of bitcoin?")
81
+ - Searching the web for information
82
+ - Generating images from text descriptions
83
+ - General knowledge questions
84
+ - And more!
85
+
86
+ """
87
+
88
  agent = CodeAgent(
89
  model=model,
90
  tools=[
 
100
  grammar=None,
101
  planning_interval=None,
102
  name=None,
103
+ description=welcome_message,
104
  prompt_templates=prompt_templates,
105
  )
106
 
107
+ GradioUI(agent, description=welcome_message).launch()