AbenzaFran commited on
Commit
f6d4f89
·
1 Parent(s): 0248b3b
Files changed (1) hide show
  1. app.py +192 -134
app.py CHANGED
@@ -9,6 +9,7 @@ import json
9
  import queue
10
  import logging
11
  from PIL import Image
 
12
 
13
  # ------------------------
14
  # LangSmith imports
@@ -18,12 +19,15 @@ from langsmith.wrappers import wrap_openai
18
  from langsmith import traceable
19
 
20
  # ------------------------
21
- # Configure logging (optional but recommended)
22
  # ------------------------
23
  def init_logging():
24
  logging.basicConfig(
25
- format="[%(asctime)s] %(levelname)+8s: %(message)s",
26
  level=logging.INFO,
 
 
 
27
  )
28
  return logging.getLogger()
29
 
@@ -37,7 +41,9 @@ api_key = os.getenv("OPENAI_API_KEY")
37
  assistant_id = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A") # The assistant we want to call
38
 
39
  if not api_key or not assistant_id:
40
- raise RuntimeError("Please set OPENAI_API_KEY and ASSISTANT_ID_SOLUTION_SPECIFIER_A in your environment")
 
 
41
 
42
  # ------------------------
43
  # Wrap the OpenAI client for LangSmith traceability
@@ -46,168 +52,235 @@ openai_client = openai.Client(api_key=api_key)
46
  client = wrap_openai(openai_client)
47
 
48
  # ------------------------
49
- # Streamlit session state
50
  # ------------------------
51
  if "messages" not in st.session_state:
52
  st.session_state["messages"] = []
53
 
54
- if "thread" not in st.session_state:
55
- st.session_state["thread"] = None
56
 
57
  if "tool_requests" not in st.session_state:
58
  st.session_state["tool_requests"] = queue.Queue()
59
 
 
 
 
60
  tool_requests = st.session_state["tool_requests"]
61
 
62
  # ------------------------
63
- # Utility to remove citations like: 【12†somefile】
64
- # You can adapt to your own "annotations" handling if needed
65
  # ------------------------
66
  def remove_citation(text: str) -> str:
67
  pattern = r"【\d+†\w+】"
68
  return re.sub(pattern, "📚", text)
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  # ------------------------
71
  # Helper: data streamer for text & images
72
- # Adapted from the Medium article approach
73
- # to handle text deltas, images, or function calls
74
  # ------------------------
75
  def data_streamer():
76
  """
77
  Streams data from the assistant run. Yields text or images
78
  and enqueues tool requests (function calls) to tool_requests.
79
  """
80
- st.toast("Thinking...", icon=":material/emoji_objects:")
 
81
  content_produced = False
82
-
83
- for event in st.session_state["run_stream"]:
84
- match event.event:
85
- case "thread.message.delta":
86
- # A chunk of text or an image
87
- content = event.data.delta.content[0]
88
- match content.type:
89
- case "text":
90
- text_value = content.text.value
91
- content_produced = True
92
- # Optionally remove citations, etc.
93
- yield remove_citation(text_value)
94
-
95
- case "image_file":
96
- # If the assistant returns an image
97
- file_id = content.image_file.file_id
98
- content_produced = True
99
- image_content = io.BytesIO(client.files.content(file_id).read())
100
- yield Image.open(image_content)
101
-
102
- case "thread.run.requires_action":
103
- # The assistant is requesting a function call
104
- logger.info(f"[Tool Request] {event}")
105
- tool_requests.put(event)
106
- if not content_produced:
107
- # We can yield a placeholder if the model hasn't said anything yet
108
- yield "[LLM is requesting a function call]"
109
- return
110
-
111
- case "thread.run.failed":
112
- # The run failed for some reason
113
- logger.error(f"Run failed: {event}")
114
- return
115
-
116
- # If we successfully streamed everything
117
- st.toast("Completed", icon=":material/emoji_objects:")
 
 
 
118
 
119
  # ------------------------
120
  # Helper: display the streaming content
121
- # This wraps data_streamer in st.write_stream
122
- # so you can see partial tokens in real-time
123
  # ------------------------
124
  def display_stream(run_stream, create_context=True):
125
  """
126
  Grabs tokens from data_streamer() and displays them in real-time.
127
  If `create_context=True`, messages are displayed as an assistant block.
128
  """
129
- st.session_state["run_stream"] = run_stream
 
 
130
  if create_context:
131
  with st.chat_message("assistant"):
132
- streamed_result = st.write_stream(data_streamer)
 
133
  else:
134
- streamed_result = st.write_stream(data_streamer)
135
-
136
- # Return whatever the final token stream is
137
- return streamed_result
138
 
139
- # ------------------------
140
- # Example of handling a function call (requires_action)
141
- # If your Assistant uses function calling (e.g. code interpreter),
142
- # you'd parse arguments, run the function, and return output here.
143
- # ------------------------
144
- def handle_tool_request(event):
145
- """
146
- Demonstrates how you might handle a function call.
147
- In practice, you'd parse the arguments from the event
148
- and run your custom logic. Then return outputs as JSON.
149
- """
150
- st.toast("Running a function (this is user-defined code)", icon=":material/function:")
151
- tool_outputs = []
152
- data = event.data
153
- for tool_call in data.required_action.submit_tool_outputs.tool_calls:
154
- if tool_call.function.arguments:
155
- function_args = json.loads(tool_call.function.arguments)
156
- else:
157
- function_args = {}
158
-
159
- match tool_call.function.name:
160
- case "hello_world":
161
- # Example: implement a user-defined function
162
- name = function_args.get("name", "anonymous")
163
- time.sleep(2) # Simulate a long-running function
164
- output_val = f"Hello, {name}! This was from a local function."
165
- tool_outputs.append({"tool_call_id": tool_call.id, "output": output_val})
166
- case _:
167
- # If unknown function name
168
- msg = {"status": "error", "message": "Unknown function request."}
169
- tool_outputs.append({"tool_call_id": tool_call.id, "output": json.dumps(msg)})
170
- return tool_outputs, data.thread_id, data.id
171
 
172
  # ------------------------
173
- # Main chat logic
174
  # ------------------------
175
- @traceable # Make this function traceable via LangSmith
176
  def generate_assistant_reply(user_input: str):
177
  """
178
- 1. If no thread exists, create a new one.
179
- 2. Insert user message into the thread.
180
- 3. Use the Assistants API to create a run + stream the response.
181
- 4. If the assistant requests a function call, handle it and stream again.
182
  """
 
 
183
  # Create or retrieve thread
184
- if not st.session_state["thread"]:
185
- st.session_state["thread"] = client.beta.threads.create()
186
- thread = st.session_state["thread"]
 
 
 
 
187
 
188
  # Add user message to the thread
189
- client.beta.threads.messages.create(
190
- thread_id=thread.id,
191
- role="user",
192
- content=user_input
193
- )
194
-
195
- # Start streaming assistant response
196
- with client.beta.threads.runs.stream(
197
- thread_id=thread.id,
198
- assistant_id=assistant_id,
199
- ) as run_stream:
200
- display_stream(run_stream)
201
-
202
- # If the assistant requested any tool calls, handle them now
 
 
 
 
 
 
 
 
 
 
 
203
  while not tool_requests.empty():
204
  event = tool_requests.get()
205
  tool_outputs, t_id, run_id = handle_tool_request(event)
206
- # Submit tool outputs
207
- with client.beta.threads.runs.submit_tool_outputs_stream(
208
- thread_id=t_id, run_id=run_id, tool_outputs=tool_outputs
209
- ) as next_stream:
210
- display_stream(next_stream, create_context=False)
 
 
 
 
 
 
211
 
212
  # ------------------------
213
  # Streamlit UI
@@ -218,33 +291,18 @@ def main():
218
 
219
  # Display existing conversation
220
  for msg in st.session_state["messages"]:
221
- with st.chat_message(msg["role"]):
222
- st.write(msg["content"])
223
 
224
  user_input = st.chat_input("Type your message here...")
225
  if user_input:
226
- # Show user's message
227
- with st.chat_message("user"):
228
- st.write(user_input)
229
 
230
- # Keep in session state
231
  st.session_state["messages"].append({"role": "user", "content": user_input})
232
 
233
  # Generate assistant reply
234
  generate_assistant_reply(user_input)
235
 
236
- # Track the final text from streamed tokens
237
- final_text = ""
238
- for msg in st.session_state.get("streamed_messages", []):
239
- final_text += msg
240
-
241
- # Store the complete assistant response in session state
242
- st.session_state["messages"].append(
243
- {"role": "assistant", "content": final_text}
244
- )
245
-
246
- # Clear streamed messages for next interaction
247
- st.session_state["streamed_messages"] = []
248
-
249
  if __name__ == "__main__":
250
  main()
 
9
  import queue
10
  import logging
11
  from PIL import Image
12
+ from typing import Optional
13
 
14
  # ------------------------
15
  # LangSmith imports
 
19
  from langsmith import traceable
20
 
21
  # ------------------------
22
+ # Configure logging
23
  # ------------------------
24
  def init_logging():
25
  logging.basicConfig(
26
+ format="[%(asctime)s] %(levelname)s: %(message)s",
27
  level=logging.INFO,
28
+ handlers=[
29
+ logging.StreamHandler()
30
+ ]
31
  )
32
  return logging.getLogger()
33
 
 
41
  assistant_id = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A") # The assistant we want to call
42
 
43
  if not api_key or not assistant_id:
44
+ logger.error("Environment variables OPENAI_API_KEY and ASSISTANT_ID_SOLUTION_SPECIFIER_A must be set.")
45
+ st.error("Missing environment configuration. Please set the required environment variables.")
46
+ st.stop()
47
 
48
  # ------------------------
49
  # Wrap the OpenAI client for LangSmith traceability
 
52
  client = wrap_openai(openai_client)
53
 
54
  # ------------------------
55
+ # Streamlit session state initialization
56
  # ------------------------
57
  if "messages" not in st.session_state:
58
  st.session_state["messages"] = []
59
 
60
+ if "thread_id" not in st.session_state:
61
+ st.session_state["thread_id"] = None
62
 
63
  if "tool_requests" not in st.session_state:
64
  st.session_state["tool_requests"] = queue.Queue()
65
 
66
+ if "current_run" not in st.session_state:
67
+ st.session_state["current_run"] = None
68
+
69
  tool_requests = st.session_state["tool_requests"]
70
 
71
  # ------------------------
72
+ # Utility to remove citations like:
 
73
  # ------------------------
74
  def remove_citation(text: str) -> str:
75
  pattern = r"【\d+†\w+】"
76
  return re.sub(pattern, "📚", text)
77
 
78
+ # ------------------------
79
+ # Function to handle tool requests (function calls)
80
+ # ------------------------
81
+ def handle_tool_request(event):
82
+ """
83
+ Processes function call requests from the assistant.
84
+ """
85
+ logger.info(f"Handling tool request: {event}")
86
+ st.toast("Processing a function call...", icon=":hammer_and_wrench:")
87
+ tool_outputs = []
88
+ data = event.data
89
+
90
+ for tool_call in data.required_action.submit_tool_outputs.tool_calls:
91
+ function_name = tool_call.function.name
92
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
93
+
94
+ logger.info(f"Executing function '{function_name}' with arguments {arguments}")
95
+
96
+ try:
97
+ # Map function names to actual implementations
98
+ if function_name == "hello_world":
99
+ output = hello_world(**arguments)
100
+ elif function_name == "another_function":
101
+ output = another_function(**arguments)
102
+ else:
103
+ raise ValueError(f"Unrecognized function name: {function_name}")
104
+
105
+ tool_outputs.append({"tool_call_id": tool_call.id, "output": output})
106
+ logger.info(f"Function '{function_name}' executed successfully.")
107
+
108
+ except Exception as e:
109
+ logger.error(f"Error executing function '{function_name}': {e}")
110
+ error_response = {"status": "error", "message": str(e)}
111
+ tool_outputs.append({"tool_call_id": tool_call.id, "output": json.dumps(error_response)})
112
+
113
+ st.toast("Function call completed.", icon=":white_check_mark:")
114
+ return tool_outputs, data.thread_id, data.id
115
+
116
+ # ------------------------
117
+ # Example function implementations
118
+ # ------------------------
119
+ def hello_world(name: str = "World") -> str:
120
+ """
121
+ Example function that returns a greeting.
122
+ """
123
+ time.sleep(2) # Simulate a delay for a long-running task
124
+ return f"Hello, {name}! This message is from a function call."
125
+
126
+ def another_function(param1: str, param2: int) -> str:
127
+ """
128
+ Another example function.
129
+ """
130
+ time.sleep(1)
131
+ return f"Received param1: {param1} and param2: {param2}."
132
+
133
+ # ------------------------
134
+ # Streamlit UI Components
135
+ # ------------------------
136
+ def display_message(role: str, content: str):
137
+ """
138
+ Displays a message in the Streamlit chat interface.
139
+ """
140
+ with st.chat_message(role):
141
+ if role == "assistant" and isinstance(content, Image.Image):
142
+ st.image(content)
143
+ else:
144
+ st.write(content)
145
+
146
  # ------------------------
147
  # Helper: data streamer for text & images
 
 
148
  # ------------------------
149
  def data_streamer():
150
  """
151
  Streams data from the assistant run. Yields text or images
152
  and enqueues tool requests (function calls) to tool_requests.
153
  """
154
+ logger.info("Starting data streamer.")
155
+ st.toast("Thinking...", icon=":hourglass_flowing_sand:")
156
  content_produced = False
157
+ accumulated_content = ""
158
+
159
+ try:
160
+ for event in st.session_state["current_run"]:
161
+ match event.event:
162
+ case "thread.message.delta":
163
+ content = event.data.delta.content[0]
164
+ match content.type:
165
+ case "text":
166
+ text_value = content.text.value
167
+ accumulated_content += text_value
168
+ content_produced = True
169
+ yield remove_citation(text_value)
170
+
171
+ case "image_file":
172
+ file_id = content.image_file.file_id
173
+ logger.info(f"Received image file ID: {file_id}")
174
+ image_content = io.BytesIO(client.files.content(file_id).read())
175
+ image = Image.open(image_content)
176
+ yield image
177
+
178
+ case "thread.run.requires_action":
179
+ logger.info(f"Run requires action: {event}")
180
+ tool_requests.put(event)
181
+ if not content_produced:
182
+ yield "[LLM is requesting a function call...]"
183
+ return
184
+
185
+ case "thread.run.failed":
186
+ logger.error(f"Run failed: {event}")
187
+ st.error("The assistant encountered an error and couldn't complete the request.")
188
+ return
189
+
190
+ except Exception as e:
191
+ logger.exception(f"Exception in data_streamer: {e}")
192
+ st.error(f"An unexpected error occurred: {e}")
193
+
194
+ finally:
195
+ st.toast("Completed", icon=":checkered_flag:")
196
 
197
  # ------------------------
198
  # Helper: display the streaming content
 
 
199
  # ------------------------
200
  def display_stream(run_stream, create_context=True):
201
  """
202
  Grabs tokens from data_streamer() and displays them in real-time.
203
  If `create_context=True`, messages are displayed as an assistant block.
204
  """
205
+ logger.info("Displaying stream.")
206
+ st.session_state["current_run"] = run_stream
207
+
208
  if create_context:
209
  with st.chat_message("assistant"):
210
+ for content in data_streamer():
211
+ display_message("assistant", content)
212
  else:
213
+ for content in data_streamer():
214
+ display_message("assistant", content)
 
 
215
 
216
+ # After streaming, accumulate the final content
217
+ # This assumes that the entire content has been yielded
218
+ # You might want to enhance this to handle partial content or interruptions
219
+ # Here, we simply capture accumulated content if it's text
220
+ # For images, it's already displayed
221
+ if accumulated_text := remove_citation(accumulated_content.strip()):
222
+ st.session_state["messages"].append({"role": "assistant", "content": accumulated_text})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
 
224
  # ------------------------
225
+ # Main chat logic with traceability
226
  # ------------------------
227
+ @traceable # Enable LangSmith traceability
228
  def generate_assistant_reply(user_input: str):
229
  """
230
+ Handles user input by creating or continuing a thread,
231
+ sending the message to the assistant, and streaming the response.
 
 
232
  """
233
+ logger.info(f"User input received: {user_input}")
234
+
235
  # Create or retrieve thread
236
+ if not st.session_state["thread_id"]:
237
+ logger.info("Creating a new thread.")
238
+ thread = client.beta.threads.create()
239
+ st.session_state["thread_id"] = thread.id
240
+ else:
241
+ thread = client.beta.threads.retrieve(thread_id=st.session_state["thread_id"])
242
+ logger.info(f"Using existing thread ID: {thread.id}")
243
 
244
  # Add user message to the thread
245
+ try:
246
+ client.beta.threads.messages.create(
247
+ thread_id=thread.id,
248
+ role="user",
249
+ content=user_input
250
+ )
251
+ logger.info("User message added to thread.")
252
+ except Exception as e:
253
+ logger.exception(f"Failed to add user message to thread: {e}")
254
+ st.error("Failed to send your message. Please try again.")
255
+ return
256
+
257
+ # Create and stream assistant response
258
+ try:
259
+ with client.beta.threads.runs.stream(
260
+ thread_id=thread.id,
261
+ assistant_id=assistant_id,
262
+ ) as run_stream:
263
+ st.session_state["current_run"] = run_stream
264
+ display_stream(run_stream)
265
+ except Exception as e:
266
+ logger.exception(f"Failed to stream assistant response: {e}")
267
+ st.error("Failed to receive a response from the assistant. Please try again.")
268
+
269
+ # Handle any function calls requested by the assistant
270
  while not tool_requests.empty():
271
  event = tool_requests.get()
272
  tool_outputs, t_id, run_id = handle_tool_request(event)
273
+
274
+ try:
275
+ with client.beta.threads.runs.submit_tool_outputs_stream(
276
+ thread_id=t_id,
277
+ run_id=run_id,
278
+ tool_outputs=tool_outputs
279
+ ) as tool_stream:
280
+ display_stream(tool_stream, create_context=False)
281
+ except Exception as e:
282
+ logger.exception(f"Failed to submit tool outputs: {e}")
283
+ st.error("Failed to process a function call from the assistant.")
284
 
285
  # ------------------------
286
  # Streamlit UI
 
291
 
292
  # Display existing conversation
293
  for msg in st.session_state["messages"]:
294
+ display_message(msg["role"], msg["content"])
 
295
 
296
  user_input = st.chat_input("Type your message here...")
297
  if user_input:
298
+ # Display user's message
299
+ display_message("user", user_input)
 
300
 
301
+ # Add user message to session state
302
  st.session_state["messages"].append({"role": "user", "content": user_input})
303
 
304
  # Generate assistant reply
305
  generate_assistant_reply(user_input)
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
  if __name__ == "__main__":
308
  main()