AbenzaFran commited on
Commit
24b6129
·
1 Parent(s): f6d4f89

went back. too many issues

Browse files
Files changed (2) hide show
  1. app.py +55 -287
  2. requirements.txt +2 -3
app.py CHANGED
@@ -2,307 +2,75 @@ import os
2
  import re
3
  import streamlit as st
4
  from dotenv import load_dotenv
 
5
 
6
- import io
7
- import time
8
- import json
9
- import queue
10
- import logging
11
- from PIL import Image
12
- from typing import Optional
13
-
14
- # ------------------------
15
- # LangSmith imports
16
- # ------------------------
17
- import openai
18
- from langsmith.wrappers import wrap_openai
19
- from langsmith import traceable
20
-
21
- # ------------------------
22
- # Configure logging
23
- # ------------------------
24
- def init_logging():
25
- logging.basicConfig(
26
- format="[%(asctime)s] %(levelname)s: %(message)s",
27
- level=logging.INFO,
28
- handlers=[
29
- logging.StreamHandler()
30
- ]
31
- )
32
- return logging.getLogger()
33
-
34
- logger = init_logging()
35
-
36
- # ------------------------
37
  # Load environment variables
38
- # ------------------------
39
  load_dotenv()
40
  api_key = os.getenv("OPENAI_API_KEY")
41
- assistant_id = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A") # The assistant we want to call
42
 
43
- if not api_key or not assistant_id:
44
- logger.error("Environment variables OPENAI_API_KEY and ASSISTANT_ID_SOLUTION_SPECIFIER_A must be set.")
45
- st.error("Missing environment configuration. Please set the required environment variables.")
46
- st.stop()
 
 
47
 
48
- # ------------------------
49
- # Wrap the OpenAI client for LangSmith traceability
50
- # ------------------------
51
- openai_client = openai.Client(api_key=api_key)
52
- client = wrap_openai(openai_client)
53
 
54
- # ------------------------
55
- # Streamlit session state initialization
56
- # ------------------------
57
  if "messages" not in st.session_state:
58
  st.session_state["messages"] = []
59
-
60
  if "thread_id" not in st.session_state:
61
  st.session_state["thread_id"] = None
62
 
63
- if "tool_requests" not in st.session_state:
64
- st.session_state["tool_requests"] = queue.Queue()
65
 
66
- if "current_run" not in st.session_state:
67
- st.session_state["current_run"] = None
68
-
69
- tool_requests = st.session_state["tool_requests"]
70
-
71
- # ------------------------
72
- # Utility to remove citations like:
73
- # ------------------------
74
- def remove_citation(text: str) -> str:
75
- pattern = r"【\d+†\w+】"
76
- return re.sub(pattern, "📚", text)
77
-
78
- # ------------------------
79
- # Function to handle tool requests (function calls)
80
- # ------------------------
81
- def handle_tool_request(event):
82
  """
83
- Processes function call requests from the assistant.
84
- """
85
- logger.info(f"Handling tool request: {event}")
86
- st.toast("Processing a function call...", icon=":hammer_and_wrench:")
87
- tool_outputs = []
88
- data = event.data
89
-
90
- for tool_call in data.required_action.submit_tool_outputs.tool_calls:
91
- function_name = tool_call.function.name
92
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
93
-
94
- logger.info(f"Executing function '{function_name}' with arguments {arguments}")
95
-
96
- try:
97
- # Map function names to actual implementations
98
- if function_name == "hello_world":
99
- output = hello_world(**arguments)
100
- elif function_name == "another_function":
101
- output = another_function(**arguments)
102
- else:
103
- raise ValueError(f"Unrecognized function name: {function_name}")
104
-
105
- tool_outputs.append({"tool_call_id": tool_call.id, "output": output})
106
- logger.info(f"Function '{function_name}' executed successfully.")
107
-
108
- except Exception as e:
109
- logger.error(f"Error executing function '{function_name}': {e}")
110
- error_response = {"status": "error", "message": str(e)}
111
- tool_outputs.append({"tool_call_id": tool_call.id, "output": json.dumps(error_response)})
112
-
113
- st.toast("Function call completed.", icon=":white_check_mark:")
114
- return tool_outputs, data.thread_id, data.id
115
-
116
- # ------------------------
117
- # Example function implementations
118
- # ------------------------
119
- def hello_world(name: str = "World") -> str:
120
- """
121
- Example function that returns a greeting.
122
- """
123
- time.sleep(2) # Simulate a delay for a long-running task
124
- return f"Hello, {name}! This message is from a function call."
125
-
126
- def another_function(param1: str, param2: int) -> str:
127
- """
128
- Another example function.
129
- """
130
- time.sleep(1)
131
- return f"Received param1: {param1} and param2: {param2}."
132
-
133
- # ------------------------
134
- # Streamlit UI Components
135
- # ------------------------
136
- def display_message(role: str, content: str):
137
- """
138
- Displays a message in the Streamlit chat interface.
139
- """
140
- with st.chat_message(role):
141
- if role == "assistant" and isinstance(content, Image.Image):
142
- st.image(content)
143
- else:
144
- st.write(content)
145
-
146
- # ------------------------
147
- # Helper: data streamer for text & images
148
- # ------------------------
149
- def data_streamer():
150
  """
151
- Streams data from the assistant run. Yields text or images
152
- and enqueues tool requests (function calls) to tool_requests.
153
- """
154
- logger.info("Starting data streamer.")
155
- st.toast("Thinking...", icon=":hourglass_flowing_sand:")
156
- content_produced = False
157
- accumulated_content = ""
158
-
159
- try:
160
- for event in st.session_state["current_run"]:
161
- match event.event:
162
- case "thread.message.delta":
163
- content = event.data.delta.content[0]
164
- match content.type:
165
- case "text":
166
- text_value = content.text.value
167
- accumulated_content += text_value
168
- content_produced = True
169
- yield remove_citation(text_value)
170
-
171
- case "image_file":
172
- file_id = content.image_file.file_id
173
- logger.info(f"Received image file ID: {file_id}")
174
- image_content = io.BytesIO(client.files.content(file_id).read())
175
- image = Image.open(image_content)
176
- yield image
177
-
178
- case "thread.run.requires_action":
179
- logger.info(f"Run requires action: {event}")
180
- tool_requests.put(event)
181
- if not content_produced:
182
- yield "[LLM is requesting a function call...]"
183
- return
184
-
185
- case "thread.run.failed":
186
- logger.error(f"Run failed: {event}")
187
- st.error("The assistant encountered an error and couldn't complete the request.")
188
- return
189
-
190
- except Exception as e:
191
- logger.exception(f"Exception in data_streamer: {e}")
192
- st.error(f"An unexpected error occurred: {e}")
193
-
194
- finally:
195
- st.toast("Completed", icon=":checkered_flag:")
196
-
197
- # ------------------------
198
- # Helper: display the streaming content
199
- # ------------------------
200
- def display_stream(run_stream, create_context=True):
201
- """
202
- Grabs tokens from data_streamer() and displays them in real-time.
203
- If `create_context=True`, messages are displayed as an assistant block.
204
- """
205
- logger.info("Displaying stream.")
206
- st.session_state["current_run"] = run_stream
207
-
208
- if create_context:
209
- with st.chat_message("assistant"):
210
- for content in data_streamer():
211
- display_message("assistant", content)
212
- else:
213
- for content in data_streamer():
214
- display_message("assistant", content)
215
-
216
- # After streaming, accumulate the final content
217
- # This assumes that the entire content has been yielded
218
- # You might want to enhance this to handle partial content or interruptions
219
- # Here, we simply capture accumulated content if it's text
220
- # For images, it's already displayed
221
- if accumulated_text := remove_citation(accumulated_content.strip()):
222
- st.session_state["messages"].append({"role": "assistant", "content": accumulated_text})
223
-
224
- # ------------------------
225
- # Main chat logic with traceability
226
- # ------------------------
227
- @traceable # Enable LangSmith traceability
228
- def generate_assistant_reply(user_input: str):
229
- """
230
- Handles user input by creating or continuing a thread,
231
- sending the message to the assistant, and streaming the response.
232
- """
233
- logger.info(f"User input received: {user_input}")
234
-
235
- # Create or retrieve thread
236
- if not st.session_state["thread_id"]:
237
- logger.info("Creating a new thread.")
238
- thread = client.beta.threads.create()
239
- st.session_state["thread_id"] = thread.id
240
  else:
241
- thread = client.beta.threads.retrieve(thread_id=st.session_state["thread_id"])
242
- logger.info(f"Using existing thread ID: {thread.id}")
243
-
244
- # Add user message to the thread
245
- try:
246
- client.beta.threads.messages.create(
247
- thread_id=thread.id,
248
- role="user",
249
- content=user_input
250
  )
251
- logger.info("User message added to thread.")
252
- except Exception as e:
253
- logger.exception(f"Failed to add user message to thread: {e}")
254
- st.error("Failed to send your message. Please try again.")
255
- return
256
-
257
- # Create and stream assistant response
258
- try:
259
- with client.beta.threads.runs.stream(
260
- thread_id=thread.id,
261
- assistant_id=assistant_id,
262
- ) as run_stream:
263
- st.session_state["current_run"] = run_stream
264
- display_stream(run_stream)
265
- except Exception as e:
266
- logger.exception(f"Failed to stream assistant response: {e}")
267
- st.error("Failed to receive a response from the assistant. Please try again.")
268
-
269
- # Handle any function calls requested by the assistant
270
- while not tool_requests.empty():
271
- event = tool_requests.get()
272
- tool_outputs, t_id, run_id = handle_tool_request(event)
273
-
274
- try:
275
- with client.beta.threads.runs.submit_tool_outputs_stream(
276
- thread_id=t_id,
277
- run_id=run_id,
278
- tool_outputs=tool_outputs
279
- ) as tool_stream:
280
- display_stream(tool_stream, create_context=False)
281
- except Exception as e:
282
- logger.exception(f"Failed to submit tool outputs: {e}")
283
- st.error("Failed to process a function call from the assistant.")
284
-
285
- # ------------------------
286
- # Streamlit UI
287
- # ------------------------
288
- def main():
289
- st.set_page_config(page_title="Solution Specifier A", layout="centered")
290
- st.title("Solution Specifier A")
291
-
292
- # Display existing conversation
293
- for msg in st.session_state["messages"]:
294
- display_message(msg["role"], msg["content"])
295
-
296
- user_input = st.chat_input("Type your message here...")
297
- if user_input:
298
- # Display user's message
299
- display_message("user", user_input)
300
-
301
- # Add user message to session state
302
- st.session_state["messages"].append({"role": "user", "content": user_input})
303
-
304
- # Generate assistant reply
305
- generate_assistant_reply(user_input)
306
-
307
- if __name__ == "__main__":
308
- main()
 
2
  import re
3
  import streamlit as st
4
  from dotenv import load_dotenv
5
+ from langchain.agents.openai_assistant import OpenAIAssistantRunnable
6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  # Load environment variables
 
8
  load_dotenv()
9
  api_key = os.getenv("OPENAI_API_KEY")
10
+ extractor_agent = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A")
11
 
12
+ # Create the assistant
13
+ extractor_llm = OpenAIAssistantRunnable(
14
+ assistant_id=extractor_agent,
15
+ api_key=api_key,
16
+ as_agent=True
17
+ )
18
 
19
+ def remove_citation(text: str) -> str:
20
+ pattern = r"【\d+†\w+】"
21
+ return re.sub(pattern, "📚", text)
 
 
22
 
23
+ # Initialize session state for messages and thread_id
 
 
24
  if "messages" not in st.session_state:
25
  st.session_state["messages"] = []
 
26
  if "thread_id" not in st.session_state:
27
  st.session_state["thread_id"] = None
28
 
29
+ st.title("Solution Specifier A")
 
30
 
31
+ def predict(user_input: str) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  """
33
+ This function calls our OpenAIAssistantRunnable to get a response.
34
+ If we don't have a thread_id yet, we create a new thread on the first call.
35
+ Otherwise, we continue the existing thread.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  """
37
+ if st.session_state["thread_id"] is None:
38
+ response = extractor_llm.invoke({"content": user_input})
39
+ st.session_state["thread_id"] = response.thread_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  else:
41
+ response = extractor_llm.invoke(
42
+ {"content": user_input, "thread_id": st.session_state["thread_id"]}
 
 
 
 
 
 
 
43
  )
44
+ output = response.return_values["output"]
45
+ return remove_citation(output)
46
+
47
+ # Display any existing messages (from a previous run or refresh)
48
+ for msg in st.session_state["messages"]:
49
+ if msg["role"] == "user":
50
+ with st.chat_message("user"):
51
+ st.write(msg["content"])
52
+ else:
53
+ with st.chat_message("assistant"):
54
+ st.write(msg["content"])
55
+
56
+ # Create the chat input widget at the bottom of the page
57
+ user_input = st.chat_input("Type your message here...")
58
+
59
+ # When the user hits ENTER on st.chat_input
60
+ if user_input:
61
+ # Add the user message to session state
62
+ st.session_state["messages"].append({"role": "user", "content": user_input})
63
+
64
+ # Display the user's message
65
+ with st.chat_message("user"):
66
+ st.write(user_input)
67
+
68
+ # Get the assistant's response
69
+ response_text = predict(user_input)
70
+
71
+ # Add the assistant response to session state
72
+ st.session_state["messages"].append({"role": "assistant", "content": response_text})
73
+
74
+ # Display the assistant's reply
75
+ with st.chat_message("assistant"):
76
+ st.write(response_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,4 +1,3 @@
1
  python-dotenv
2
- langsmith
3
- openai
4
- Pillow
 
1
  python-dotenv
2
+ langchain
3
+ langchain-openai