Spaces:
Sleeping
Sleeping
Commit
·
6853d31
1
Parent(s):
96157a7
now the ai message is not replaced by [Assistant reply streamed above] in the chat ui
Browse files
app.py
CHANGED
|
@@ -113,11 +113,13 @@ class StreamHandler:
|
|
| 113 |
def __init__(self, client: Any):
|
| 114 |
self.client = client
|
| 115 |
self.text_processor = TextProcessor()
|
|
|
|
| 116 |
|
| 117 |
def stream_data(self) -> Generator[Any, None, None]:
|
| 118 |
"""Stream data from the assistant run."""
|
| 119 |
st.toast("Thinking...", icon="🤔")
|
| 120 |
content_produced = False
|
|
|
|
| 121 |
|
| 122 |
try:
|
| 123 |
for event in st.session_state.run_stream:
|
|
@@ -131,6 +133,8 @@ class StreamHandler:
|
|
| 131 |
raise StreamingError(f"Assistant run failed: {event}")
|
| 132 |
|
| 133 |
st.toast("Completed", icon="✅")
|
|
|
|
|
|
|
| 134 |
except Exception as e:
|
| 135 |
logger.error(f"Streaming error: {e}")
|
| 136 |
st.error(f"An error occurred while streaming: {str(e)}")
|
|
@@ -141,7 +145,9 @@ class StreamHandler:
|
|
| 141 |
content = event.data.delta.content[0]
|
| 142 |
match content.type:
|
| 143 |
case "text":
|
| 144 |
-
|
|
|
|
|
|
|
| 145 |
case "image_file":
|
| 146 |
image_content = io.BytesIO(self.client.files.content(content.image_file.file_id).read())
|
| 147 |
yield Image.open(image_content)
|
|
@@ -199,7 +205,7 @@ class AssistantManager:
|
|
| 199 |
self.tool_handler = ToolRequestHandler()
|
| 200 |
|
| 201 |
@traceable
|
| 202 |
-
def generate_reply(self, user_input: str) ->
|
| 203 |
"""Generate and stream assistant's reply."""
|
| 204 |
# Ensure thread exists
|
| 205 |
if not st.session_state.thread:
|
|
@@ -212,24 +218,28 @@ class AssistantManager:
|
|
| 212 |
content=user_input
|
| 213 |
)
|
| 214 |
|
|
|
|
|
|
|
| 215 |
# Stream initial response
|
| 216 |
with self.client.beta.threads.runs.stream(
|
| 217 |
thread_id=st.session_state.thread.id,
|
| 218 |
assistant_id=self.assistant_id,
|
| 219 |
) as run_stream:
|
| 220 |
-
self._display_stream(run_stream)
|
| 221 |
|
| 222 |
# Handle any tool requests
|
| 223 |
self._process_tool_requests()
|
|
|
|
|
|
|
| 224 |
|
| 225 |
-
def _display_stream(self, run_stream: Any, create_context: bool = True) ->
|
| 226 |
"""Display streaming content."""
|
| 227 |
st.session_state.run_stream = run_stream
|
| 228 |
if create_context:
|
| 229 |
with st.chat_message("assistant"):
|
| 230 |
-
st.write_stream(self.stream_handler.stream_data)
|
| 231 |
else:
|
| 232 |
-
st.write_stream(self.stream_handler.stream_data)
|
| 233 |
|
| 234 |
def _process_tool_requests(self) -> None:
|
| 235 |
"""Process any pending tool requests."""
|
|
@@ -292,10 +302,10 @@ class ChatApplication:
|
|
| 292 |
|
| 293 |
# Generate and display assistant reply
|
| 294 |
try:
|
| 295 |
-
self.assistant_manager.generate_reply(user_input)
|
| 296 |
self.state_manager.add_message(
|
| 297 |
"assistant",
|
| 298 |
-
|
| 299 |
)
|
| 300 |
except Exception as e:
|
| 301 |
st.error(f"Error generating response: {str(e)}")
|
|
|
|
| 113 |
def __init__(self, client: Any):
|
| 114 |
self.client = client
|
| 115 |
self.text_processor = TextProcessor()
|
| 116 |
+
self.complete_response = []
|
| 117 |
|
| 118 |
def stream_data(self) -> Generator[Any, None, None]:
|
| 119 |
"""Stream data from the assistant run."""
|
| 120 |
st.toast("Thinking...", icon="🤔")
|
| 121 |
content_produced = False
|
| 122 |
+
self.complete_response = [] # Reset for new stream
|
| 123 |
|
| 124 |
try:
|
| 125 |
for event in st.session_state.run_stream:
|
|
|
|
| 133 |
raise StreamingError(f"Assistant run failed: {event}")
|
| 134 |
|
| 135 |
st.toast("Completed", icon="✅")
|
| 136 |
+
# Return the complete response for storage
|
| 137 |
+
return "".join(self.complete_response)
|
| 138 |
except Exception as e:
|
| 139 |
logger.error(f"Streaming error: {e}")
|
| 140 |
st.error(f"An error occurred while streaming: {str(e)}")
|
|
|
|
| 145 |
content = event.data.delta.content[0]
|
| 146 |
match content.type:
|
| 147 |
case "text":
|
| 148 |
+
processed_text = self.text_processor.remove_citations(content.text.value)
|
| 149 |
+
self.complete_response.append(processed_text) # Store the chunk
|
| 150 |
+
yield processed_text
|
| 151 |
case "image_file":
|
| 152 |
image_content = io.BytesIO(self.client.files.content(content.image_file.file_id).read())
|
| 153 |
yield Image.open(image_content)
|
|
|
|
| 205 |
self.tool_handler = ToolRequestHandler()
|
| 206 |
|
| 207 |
@traceable
|
| 208 |
+
def generate_reply(self, user_input: str) -> str:
|
| 209 |
"""Generate and stream assistant's reply."""
|
| 210 |
# Ensure thread exists
|
| 211 |
if not st.session_state.thread:
|
|
|
|
| 218 |
content=user_input
|
| 219 |
)
|
| 220 |
|
| 221 |
+
complete_response = ""
|
| 222 |
+
|
| 223 |
# Stream initial response
|
| 224 |
with self.client.beta.threads.runs.stream(
|
| 225 |
thread_id=st.session_state.thread.id,
|
| 226 |
assistant_id=self.assistant_id,
|
| 227 |
) as run_stream:
|
| 228 |
+
complete_response = self._display_stream(run_stream)
|
| 229 |
|
| 230 |
# Handle any tool requests
|
| 231 |
self._process_tool_requests()
|
| 232 |
+
|
| 233 |
+
return complete_response
|
| 234 |
|
| 235 |
+
def _display_stream(self, run_stream: Any, create_context: bool = True) -> str:
|
| 236 |
"""Display streaming content."""
|
| 237 |
st.session_state.run_stream = run_stream
|
| 238 |
if create_context:
|
| 239 |
with st.chat_message("assistant"):
|
| 240 |
+
return st.write_stream(self.stream_handler.stream_data)
|
| 241 |
else:
|
| 242 |
+
return st.write_stream(self.stream_handler.stream_data)
|
| 243 |
|
| 244 |
def _process_tool_requests(self) -> None:
|
| 245 |
"""Process any pending tool requests."""
|
|
|
|
| 302 |
|
| 303 |
# Generate and display assistant reply
|
| 304 |
try:
|
| 305 |
+
complete_response = self.assistant_manager.generate_reply(user_input)
|
| 306 |
self.state_manager.add_message(
|
| 307 |
"assistant",
|
| 308 |
+
complete_response
|
| 309 |
)
|
| 310 |
except Exception as e:
|
| 311 |
st.error(f"Error generating response: {str(e)}")
|