Spaces:
Running
Running
Commit
Β·
dd63537
1
Parent(s):
b16bcc2
Changed thinking streaming
Browse files- src/gradio_utils.py +11 -15
src/gradio_utils.py
CHANGED
@@ -93,19 +93,19 @@ def update_map_on_selection(row: pd.Series, df_routes: gr.State) -> Map:
|
|
93 |
def pull_messages_from_step(step_log, test_mode: bool = True):
|
94 |
"""Extract ChatMessage objects from agent steps"""
|
95 |
if isinstance(step_log, ActionStep):
|
96 |
-
yield
|
97 |
if step_log.tool_calls is not None:
|
98 |
first_tool_call = step_log.tool_calls[0]
|
99 |
used_code = first_tool_call.name == "code interpreter"
|
100 |
content = first_tool_call.arguments
|
101 |
if used_code:
|
102 |
content = f"```py\n{content}\n```"
|
103 |
-
yield
|
104 |
|
105 |
if step_log.observations is not None:
|
106 |
-
yield
|
107 |
if step_log.error is not None:
|
108 |
-
yield
|
109 |
|
110 |
|
111 |
# Simplified interaction function
|
@@ -114,7 +114,8 @@ def interact_with_agent(agent, prompt, messages, df_routes, additional_args):
|
|
114 |
messages.append(gr.ChatMessage(role="user", content=prompt))
|
115 |
yield (messages, df_routes, gr.Textbox(value=FINAL_MESSAGE_HEADER, container=True))
|
116 |
|
117 |
-
|
|
|
118 |
|
119 |
for msg, _df_routes, final_message in stream_to_gradio(
|
120 |
agent,
|
@@ -123,7 +124,7 @@ def interact_with_agent(agent, prompt, messages, df_routes, additional_args):
|
|
123 |
reset_agent_memory=True,
|
124 |
additional_args=additional_args,
|
125 |
):
|
126 |
-
if msg.metadata
|
127 |
messages[-1] = msg
|
128 |
else:
|
129 |
messages.append(msg)
|
@@ -144,16 +145,11 @@ def stream_to_gradio(
|
|
144 |
accumulated_thoughts = ""
|
145 |
accumulated_errors = ""
|
146 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, **kwargs):
|
147 |
-
|
148 |
-
for (obs, error) in pull_messages_from_step(step_log, test_mode=test_mode):
|
149 |
-
|
150 |
-
if len(obs)>0:
|
151 |
-
accumulated_thoughts += f"{obs}\n\n"
|
152 |
-
message = gr.ChatMessage(role="assistant", metadata={"title": "π€ππ"}, content=str(obs))
|
153 |
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
yield (message, df_routes, gr.Markdown(value=FINAL_MESSAGE_HEADER , container=True))
|
158 |
|
159 |
final_answer = step_log # Last log is the run's final_answer
|
|
|
93 |
def pull_messages_from_step(step_log, test_mode: bool = True):
|
94 |
"""Extract ChatMessage objects from agent steps"""
|
95 |
if isinstance(step_log, ActionStep):
|
96 |
+
yield step_log.llm_output
|
97 |
if step_log.tool_calls is not None:
|
98 |
first_tool_call = step_log.tool_calls[0]
|
99 |
used_code = first_tool_call.name == "code interpreter"
|
100 |
content = first_tool_call.arguments
|
101 |
if used_code:
|
102 |
content = f"```py\n{content}\n```"
|
103 |
+
yield str(content)
|
104 |
|
105 |
if step_log.observations is not None:
|
106 |
+
yield step_log.observations
|
107 |
if step_log.error is not None:
|
108 |
+
yield f"Error π₯π₯: {str(step_log.error)}"
|
109 |
|
110 |
|
111 |
# Simplified interaction function
|
|
|
114 |
messages.append(gr.ChatMessage(role="user", content=prompt))
|
115 |
yield (messages, df_routes, gr.Textbox(value=FINAL_MESSAGE_HEADER, container=True))
|
116 |
|
117 |
+
messages.append(gr.ChatMessage(role="assistant", content="", "title": "π€ππ"))
|
118 |
+
yield (messages, df_routes, gr.Textbox(value=FINAL_MESSAGE_HEADER, container=True))
|
119 |
|
120 |
for msg, _df_routes, final_message in stream_to_gradio(
|
121 |
agent,
|
|
|
124 |
reset_agent_memory=True,
|
125 |
additional_args=additional_args,
|
126 |
):
|
127 |
+
if msg.metadata["title"] == "π€ππ" :
|
128 |
messages[-1] = msg
|
129 |
else:
|
130 |
messages.append(msg)
|
|
|
145 |
accumulated_thoughts = ""
|
146 |
accumulated_errors = ""
|
147 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, **kwargs):
|
148 |
+
for agent_thought in pull_messages_from_step(step_log, test_mode=test_mode):
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
+
|
151 |
+
accumulated_thoughts += f"{agent_thought}\n\n"
|
152 |
+
message = gr.ChatMessage(role="assistant", metadata={"title": "π€ππ"}, content=str( accumulated_thoughts))
|
153 |
yield (message, df_routes, gr.Markdown(value=FINAL_MESSAGE_HEADER , container=True))
|
154 |
|
155 |
final_answer = step_log # Last log is the run's final_answer
|