AbenzaFran commited on
Commit
b8736af
·
verified ·
1 Parent(s): 6fffc87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -240
app.py CHANGED
@@ -1,258 +1,76 @@
1
  import os
2
- import io
3
- import json
4
- import time
5
- import queue
6
- import logging
7
  import streamlit as st
8
  from dotenv import load_dotenv
9
- from PIL import Image
10
- from streamlit import session_state as ss
11
-
12
- # Optional: for direct Assistants API usage:
13
- # from openai import OpenAI
14
- # But we'll also show a LangChain approach:
15
  from langchain.agents.openai_assistant import OpenAIAssistantRunnable
16
- from langchain_core.agents import AgentFinish # If you want to handle final states, etc.
17
 
18
- #############################################
19
- # 1) ENV & BASIC LOGGING
20
- #############################################
21
  load_dotenv()
 
 
22
 
23
- logging.basicConfig(format="[%(asctime)s] %(levelname)+8s: %(message)s")
24
- logger = logging.getLogger(__name__)
25
- logger.setLevel(logging.INFO)
26
-
27
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
28
- ASSISTANT_ID = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A")
29
-
30
-
31
- #############################################
32
- # 2) CREATE YOUR ASSISTANT RUNNABLE
33
- #############################################
34
- if not OPENAI_API_KEY or not ASSISTANT_ID:
35
- raise ValueError("Missing OPENAI_API_KEY or ASSISTANT_ID in environment.")
36
-
37
- assistant_runnable = OpenAIAssistantRunnable(
38
- assistant_id=ASSISTANT_ID,
39
- api_key=OPENAI_API_KEY,
40
  as_agent=True
41
  )
42
 
43
- # We’ll store a queue for function calls (tools) we want to handle:
44
- if "tool_requests" not in ss:
45
- ss["tool_requests"] = queue.Queue()
46
-
47
- #############################################
48
- # 3) OPTIONAL: EXAMPLE CUSTOM FUNCTION (TOOL)
49
- #############################################
50
- def hello_world(name: str) -> str:
51
- """Example function to show how to handle 'requires_action' or function calls."""
52
- time.sleep(3)
53
- return f"Hello, {name}! This greeting took 3s."
54
-
55
- #############################################
56
- # 4) STREAMING HANDLER
57
- #############################################
58
- def data_streamer(stream_events):
59
- """
60
- Generator that processes streaming events from the Assistants API.
61
- Yields either text, images, or triggers a function call queue item.
62
- """
63
- st.toast("Thinking...", icon="🤔")
64
- content_produced = False
65
-
66
- # We'll mimic the logic in that Medium article:
67
- for response in stream_events:
68
- event_type = response.event
69
-
70
- if event_type == "thread.message.delta":
71
- # The model is streaming partial text or possibly an image
72
- content = response.data.delta.content[0] # Typically a list of 1 item
73
- content_type = content.type
74
- if content_type == "text":
75
- text_value = content.text.value
76
- content_produced = True
77
- yield text_value # yield text tokens
78
- elif content_type == "image_file":
79
- # The Assistant can output images
80
- file_id = content.image_file.file_id
81
- # You can retrieve the file from the OpenAI Assistants API, e.g.
82
- # image_bytes = client.files.content(file_id).read()
83
- # but with LangChain's current approach, we don't have that convenience method exposed.
84
- # We'll skip a real API call for brevity:
85
- st.warning("Image streaming not fully implemented in this snippet.")
86
- # yield an "Image" object if you have it
87
- # yield Image.open(...)
88
-
89
- elif event_type == "thread.run.requires_action":
90
- # The Assistant wants to call a function
91
- logger.info("Run requires action (function call) – queueing it.")
92
- ss["tool_requests"].put(response)
93
- # If no text was produced yet, yield a placeholder
94
- if not content_produced:
95
- yield "[Assistant is requesting a function call]"
96
- # Return so we can handle the function call
97
- return
98
 
99
- elif event_type == "thread.run.failed":
100
- st.error("Run has failed.")
101
- return
 
 
102
 
103
- st.toast("Done.", icon="✅")
104
 
105
-
106
- def display_stream(stream_iterator, new_chat_context=True):
107
  """
108
- Wraps the `data_streamer` generator and writes to Streamlit in real-time.
109
- If `new_chat_context=True`, we put the response in a dedicated assistant chat bubble.
 
110
  """
111
- if new_chat_context:
112
- with st.chat_message("assistant"):
113
- response = st.write_stream(data_streamer(stream_iterator))
114
  else:
115
- # If we are continuing inside the same bubble (like after a function call),
116
- # we skip creating a new chat bubble.
117
- response = st.write_stream(data_streamer(stream_iterator))
118
- return response
119
-
120
-
121
- #############################################
122
- # 5) ACTUAL APP
123
- #############################################
124
- def main():
125
- st.set_page_config(page_title="Streamlit + Assistants Demo", layout="centered")
126
- st.title("Enhanced Assistant Demo")
127
-
128
- # Initialize messages
129
- if "messages" not in ss:
130
- ss.messages = []
131
-
132
- # Display previous messages
133
- for msg in ss.messages:
134
- with st.chat_message(msg["role"]):
135
- st.write(msg["content"])
136
-
137
- # -- (A) FILE UPLOAD DEMO --
138
- # If you want the user to upload a CSV and pass it to the assistant, do so here.
139
- uploaded_file = st.file_uploader("Upload a CSV for the assistant to analyze (optional)", type=["csv"])
140
- if uploaded_file:
141
- st.write("We won't fully implement code interpreter logic here, but you could pass it in as a tool resource.")
142
- # For example, you might store it in the code interpreter or do a vector search, etc.
143
-
144
- # -- (B) Chat Input --
145
- user_input = st.chat_input("Ask me anything or request a function call...")
146
-
147
- if user_input:
148
- # Show user's message
149
  with st.chat_message("user"):
150
- st.write(user_input)
151
- ss.messages.append({"role": "user", "content": user_input})
152
-
153
- # (C) Actually run the assistant in "streaming mode"
154
- # For a brand-new conversation, omit thread_id. Otherwise, pass an existing one.
155
- # We'll store one globally in session_state for continuity.
156
- if "thread_id" not in ss:
157
- ss["thread_id"] = None
158
-
159
- # If we have no thread_id yet, this is a fresh conversation
160
- if ss["thread_id"] is None:
161
- resp = assistant_runnable.invoke({"content": user_input})
162
- ss["thread_id"] = resp.thread_id
163
-
164
- # For a single-turn request (non-streaming):
165
- # resp_text = resp.return_values["output"]
166
- # st.write(resp_text)
167
-
168
- # But let's do streaming. The tricky part: langchain’s `invoke` returns
169
- # the final message rather than a streaming generator. So, to do streaming,
170
- # we can call the underlying Assistants API directly. Or we can do a special
171
- # approach that merges the new article's logic.
172
-
173
- # For demonstration, let's store the final message in a new chat bubble:
174
- final_text = resp.return_values["output"]
175
- with st.chat_message("assistant"):
176
- st.write(final_text)
177
- ss.messages.append({"role": "assistant", "content": final_text})
178
-
179
- else:
180
- # We have an existing thread. Let's continue the conversation with streaming
181
- # We'll do that using the new openai client approach or via the
182
- # same approach as the Medium article. But that means we need direct access
183
- # to the thread, which we can do by "cheating" with the raw python SDK or by
184
- # implementing a custom loop with the AgentExecutor.
185
- #
186
- # For demonstration, let's do something *conceptual*:
187
- from openai import OpenAI
188
- openai_client = OpenAI(api_key=OPENAI_API_KEY)
189
-
190
- # We'll do a 'threads.runs.stream' call:
191
- with openai_client.beta.threads.runs.stream(
192
- thread_id=ss["thread_id"],
193
- assistant_id=ASSISTANT_ID,
194
- ) as stream:
195
- # We have to add the user's message to the thread first:
196
- openai_client.beta.threads.messages.create(
197
- thread_id=ss["thread_id"],
198
- role="user",
199
- content=user_input
200
- )
201
- # Now the assistant responds in the stream:
202
- display_stream(stream, new_chat_context=True)
203
-
204
- # If there's a function call required:
205
- while not ss["tool_requests"].empty():
206
- with st.chat_message("assistant"):
207
- tool_request = ss["tool_requests"].get()
208
- tool_outputs, thread_id, run_id = handle_requires_action(tool_request)
209
- with openai_client.beta.threads.runs.submit_tool_outputs_stream(
210
- thread_id=thread_id,
211
- run_id=run_id,
212
- tool_outputs=tool_outputs
213
- ) as tool_stream:
214
- display_stream(tool_stream, new_chat_context=False)
215
-
216
- st.write("---")
217
- st.info("This is a demo of combining streaming, function calls, and file upload.")
218
-
219
-
220
- def handle_requires_action(tool_request):
221
- """
222
- This function is triggered when the assistant tries to call a function mid-run.
223
- We parse the arguments, call the function, and return the outputs so the run can continue.
224
- """
225
- st.toast("Assistant is requesting a function call...", icon="🔧")
226
- data = tool_request.data
227
- tool_outputs = []
228
-
229
- # The list of tools the assistant wants to call
230
- if not hasattr(data.required_action.submit_tool_outputs, "tool_calls"):
231
- st.error("No tool calls found in the request.")
232
- return [], data.thread_id, data.id
233
-
234
- for tc in data.required_action.submit_tool_outputs.tool_calls:
235
- func_name = tc.function.name
236
- func_args = json.loads(tc.function.arguments or "{}")
237
-
238
- if func_name == "hello_world":
239
- name_str = func_args.get("name", "Anonymous")
240
- result = hello_world(name_str)
241
- # Return the output to the assistant
242
- tool_outputs.append({
243
- "tool_call_id": tc.id,
244
- "output": result
245
- })
246
- else:
247
- # Unrecognized function
248
- error_msg = f"Function '{func_name}' not recognized."
249
- tool_outputs.append({
250
- "tool_call_id": tc.id,
251
- "output": json.dumps({"error": error_msg})
252
- })
253
-
254
- return tool_outputs, data.thread_id, data.id
255
-
256
 
257
- if __name__ == "__main__":
258
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import re
 
 
 
 
3
  import streamlit as st
4
  from dotenv import load_dotenv
 
 
 
 
 
 
5
  from langchain.agents.openai_assistant import OpenAIAssistantRunnable
 
6
 
7
+ # Load environment variables
 
 
8
  load_dotenv()
9
+ api_key = os.getenv("OPENAI_API_KEY")
10
+ extractor_agent = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A")
11
 
12
+ # Create the assistant
13
+ extractor_llm = OpenAIAssistantRunnable(
14
+ assistant_id=extractor_agent,
15
+ api_key=api_key,
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  as_agent=True
17
  )
18
 
19
+ def remove_citation(text: str) -> str:
20
+ pattern = r"【\d+†\w+】"
21
+ return re.sub(pattern, "📚", text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ # Initialize session state for messages and thread_id
24
+ if "messages" not in st.session_state:
25
+ st.session_state["messages"] = []
26
+ if "thread_id" not in st.session_state:
27
+ st.session_state["thread_id"] = None
28
 
29
+ st.title("Solution Specifier A")
30
 
31
+ def predict(user_input: str) -> str:
 
32
  """
33
+ This function calls our OpenAIAssistantRunnable to get a response.
34
+ If we don't have a thread_id yet, we create a new thread on the first call.
35
+ Otherwise, we continue the existing thread.
36
  """
37
+ if st.session_state["thread_id"] is None:
38
+ response = extractor_llm.invoke({"content": user_input})
39
+ st.session_state["thread_id"] = response.thread_id
40
  else:
41
+ response = extractor_llm.invoke(
42
+ {"content": user_input, "thread_id": st.session_state["thread_id"]}
43
+ )
44
+ output = response.return_values["output"]
45
+ return remove_citation(output)
46
+
47
+ # Display any existing messages (from a previous run or refresh)
48
+ for msg in st.session_state["messages"]:
49
+ if msg["role"] == "user":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  with st.chat_message("user"):
51
+ st.write(msg["content"])
52
+ else:
53
+ with st.chat_message("assistant"):
54
+ st.write(msg["content"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
+ # Create the chat input widget at the bottom of the page
57
+ user_input = st.chat_input("Type your message here...")
58
+
59
+ # When the user hits ENTER on st.chat_input
60
+ if user_input:
61
+ # Add the user message to session state
62
+ st.session_state["messages"].append({"role": "user", "content": user_input})
63
+
64
+ # Display the user's message
65
+ with st.chat_message("user"):
66
+ st.write(user_input)
67
+
68
+ # Get the assistant's response
69
+ response_text = predict(user_input)
70
+
71
+ # Add the assistant response to session state
72
+ st.session_state["messages"].append({"role": "assistant", "content": response_text})
73
+
74
+ # Display the assistant's reply
75
+ with st.chat_message("assistant"):
76
+ st.write(response_text)