Commit
·
713dd57
1
Parent(s):
7c11bd6
refactor. no langchain
Browse files- app.py +185 -56
- requirements.txt +3 -3
app.py
CHANGED
@@ -2,85 +2,214 @@ import os
|
|
2 |
import re
|
3 |
import streamlit as st
|
4 |
from dotenv import load_dotenv
|
5 |
-
import openai
|
6 |
from langsmith import traceable
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Load environment variables
|
9 |
load_dotenv()
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
#
|
14 |
def remove_citation(text: str) -> str:
|
15 |
pattern = r"【\d+†\w+】"
|
16 |
return re.sub(pattern, "📚", text)
|
17 |
|
18 |
-
# Initialize session state for messages and
|
19 |
if "messages" not in st.session_state:
|
20 |
st.session_state["messages"] = []
|
21 |
if "thread_id" not in st.session_state:
|
22 |
st.session_state["thread_id"] = None
|
|
|
|
|
23 |
|
24 |
-
st.
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
"""
|
30 |
-
This function calls OpenAI API to get a response.
|
31 |
-
If thread_id is provided, it continues the conversation.
|
32 |
-
Otherwise, it starts a new conversation.
|
33 |
-
"""
|
34 |
-
messages = [{"role": "user", "content": user_input}]
|
35 |
-
if thread_id:
|
36 |
-
response = openai.ChatCompletion.create(
|
37 |
-
model="gpt-3.5-turbo",
|
38 |
-
messages=messages,
|
39 |
-
user=thread_id
|
40 |
-
)
|
41 |
-
else:
|
42 |
-
response = openai.ChatCompletion.create(
|
43 |
-
model="gpt-3.5-turbo",
|
44 |
-
messages=messages
|
45 |
-
)
|
46 |
-
return response["choices"][0]["message"]["content"], response["id"]
|
47 |
-
|
48 |
-
# Streamlit app logic
|
49 |
-
def predict(user_input: str) -> str:
|
50 |
-
if st.session_state["thread_id"] is None:
|
51 |
-
response_text, thread_id = get_response(user_input)
|
52 |
-
st.session_state["thread_id"] = thread_id
|
53 |
-
else:
|
54 |
-
response_text, _ = get_response(user_input, thread_id=st.session_state["thread_id"])
|
55 |
-
return remove_citation(response_text)
|
56 |
|
57 |
-
# Display
|
58 |
for msg in st.session_state["messages"]:
|
59 |
if msg["role"] == "user":
|
60 |
with st.chat_message("user"):
|
61 |
st.write(msg["content"])
|
62 |
else:
|
63 |
with st.chat_message("assistant"):
|
64 |
-
|
|
|
|
|
|
|
65 |
|
66 |
-
#
|
67 |
user_input = st.chat_input("Type your message here...")
|
68 |
|
69 |
-
#
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
with st.chat_message("assistant"):
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import re
|
3 |
import streamlit as st
|
4 |
from dotenv import load_dotenv
|
|
|
5 |
from langsmith import traceable
|
6 |
+
from langsmith.wrappers import wrap_openai
|
7 |
+
import openai
|
8 |
+
import asyncio
|
9 |
+
import threading
|
10 |
+
from PIL import Image
|
11 |
+
import io
|
12 |
+
import json
|
13 |
+
import queue
|
14 |
+
import logging
|
15 |
+
import time
|
16 |
|
17 |
# Load environment variables
|
18 |
load_dotenv()
|
19 |
+
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
20 |
+
LANGSMITH_API_KEY = os.getenv("LANGSMITH_API_KEY")
|
21 |
+
ASSISTANT_ID = os.getenv("ASSISTANT_ID")
|
22 |
+
|
23 |
+
if not all([OPENAI_API_KEY, LANGSMITH_API_KEY, ASSISTANT_ID]):
|
24 |
+
raise ValueError("Please set OPENAI_API_KEY, LANGSMITH_API_KEY, and ASSISTANT_ID in your .env file.")
|
25 |
+
|
26 |
+
# Initialize logging
|
27 |
+
logging.basicConfig(format="[%(asctime)s] %(levelname)s: %(message)s", level=logging.INFO)
|
28 |
+
logger = logging.getLogger(__name__)
|
29 |
+
|
30 |
+
# Initialize Langsmith's traceable OpenAI client
|
31 |
+
wrapped_openai = wrap_openai(openai.Client(api_key=OPENAI_API_KEY, api_base="https://api.openai.com"))
|
32 |
+
|
33 |
+
# Initialize Langsmith client (ensure you have configured Langsmith correctly)
|
34 |
+
# Assuming Langsmith uses environment variables or configuration files for setup
|
35 |
+
# If not, initialize it here accordingly
|
36 |
+
|
37 |
+
# Define a traceable function to handle Assistant interactions
|
38 |
+
@traceable
|
39 |
+
def create_run(thread_id: str, assistant_id: str) -> openai.beta.RunsStream:
|
40 |
+
"""
|
41 |
+
Creates a streaming run with the Assistant.
|
42 |
+
"""
|
43 |
+
return wrapped_openai.beta.threads.runs.stream(
|
44 |
+
thread_id=thread_id,
|
45 |
+
assistant_id=assistant_id,
|
46 |
+
model="gpt-4o", # Replace with your desired model
|
47 |
+
stream=True
|
48 |
+
)
|
49 |
|
50 |
+
# Function to remove citations as per your original code
|
51 |
def remove_citation(text: str) -> str:
|
52 |
pattern = r"【\d+†\w+】"
|
53 |
return re.sub(pattern, "📚", text)
|
54 |
|
55 |
+
# Initialize session state for messages, thread_id, and tool_requests
|
56 |
if "messages" not in st.session_state:
|
57 |
st.session_state["messages"] = []
|
58 |
if "thread_id" not in st.session_state:
|
59 |
st.session_state["thread_id"] = None
|
60 |
+
if "tool_requests" not in st.session_state:
|
61 |
+
st.session_state["tool_requests"] = queue.Queue()
|
62 |
|
63 |
+
tool_requests = st.session_state["tool_requests"]
|
64 |
|
65 |
+
# Initialize Streamlit page
|
66 |
+
st.set_page_config(page_title="Solution Specifier A", layout="centered")
|
67 |
+
st.title("Solution Specifier A")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
# Display existing messages
|
70 |
for msg in st.session_state["messages"]:
|
71 |
if msg["role"] == "user":
|
72 |
with st.chat_message("user"):
|
73 |
st.write(msg["content"])
|
74 |
else:
|
75 |
with st.chat_message("assistant"):
|
76 |
+
if isinstance(msg["content"], Image.Image):
|
77 |
+
st.image(msg["content"])
|
78 |
+
else:
|
79 |
+
st.write(msg["content"])
|
80 |
|
81 |
+
# Chat input widget
|
82 |
user_input = st.chat_input("Type your message here...")
|
83 |
|
84 |
+
# Function to handle tool requests (function calls)
|
85 |
+
def handle_requires_action(tool_request):
|
86 |
+
st.toast("Running a function", icon=":material/function:")
|
87 |
+
tool_outputs = []
|
88 |
+
data = tool_request.data
|
89 |
+
for tool in data.required_action.submit_tool_outputs.tool_calls:
|
90 |
+
if tool.function.arguments:
|
91 |
+
function_arguments = json.loads(tool.function.arguments)
|
92 |
+
else:
|
93 |
+
function_arguments = {}
|
94 |
+
match tool.function.name:
|
95 |
+
case "hello_world":
|
96 |
+
logger.info("Calling hello_world function")
|
97 |
+
answer = hello_world(**function_arguments)
|
98 |
+
tool_outputs.append({"tool_call_id": tool.id, "output": answer})
|
99 |
+
case _:
|
100 |
+
logger.error(f"Unrecognized function name: {tool.function.name}. Tool: {tool}")
|
101 |
+
ret_val = {
|
102 |
+
"status": "error",
|
103 |
+
"message": f"Function name is not recognized. Ensure the correct function name and try again."
|
104 |
+
}
|
105 |
+
tool_outputs.append({"tool_call_id": tool.id, "output": json.dumps(ret_val)})
|
106 |
+
st.toast("Function completed", icon=":material/function:")
|
107 |
+
return tool_outputs, data.thread_id, data.id
|
108 |
+
|
109 |
+
# Example function that could be called by the Assistant
|
110 |
+
def hello_world(name: str) -> str:
|
111 |
+
time.sleep(2) # Simulate a long-running task
|
112 |
+
return f"Hello {name}!"
|
113 |
+
|
114 |
+
# Function to add assistant messages to session state
|
115 |
+
def add_message_to_state_session(message):
|
116 |
+
if len(message) > 0:
|
117 |
+
st.session_state["messages"].append({"role": "assistant", "content": message})
|
118 |
+
|
119 |
+
# Function to process streamed data
|
120 |
+
def data_streamer(stream):
|
121 |
+
"""
|
122 |
+
Stream data from the assistant. Text messages are yielded. Images and tool requests are put in the queue.
|
123 |
+
"""
|
124 |
+
logger.info("Starting data streamer")
|
125 |
+
st.toast("Thinking...", icon=":material/emoji_objects:")
|
126 |
+
content_produced = False
|
127 |
+
try:
|
128 |
+
for response in stream:
|
129 |
+
event = response.event
|
130 |
+
if event == "thread.message.delta":
|
131 |
+
content = response.data.delta.content[0]
|
132 |
+
if content.type == "text":
|
133 |
+
value = content.text.value
|
134 |
+
content_produced = True
|
135 |
+
yield value
|
136 |
+
elif content.type == "image_file":
|
137 |
+
logger.info("Image file received")
|
138 |
+
image_content = io.BytesIO(wrapped_openai.files.content(content.image_file.file_id).read())
|
139 |
+
img = Image.open(image_content)
|
140 |
+
content_produced = True
|
141 |
+
yield img
|
142 |
+
elif event == "thread.run.requires_action":
|
143 |
+
logger.info("Run requires action")
|
144 |
+
tool_requests.put(response)
|
145 |
+
if not content_produced:
|
146 |
+
yield "[LLM requires a function call]"
|
147 |
+
break
|
148 |
+
elif event == "thread.run.failed":
|
149 |
+
logger.error("Run failed")
|
150 |
+
yield "[Run failed]"
|
151 |
+
break
|
152 |
+
finally:
|
153 |
+
st.toast("Completed", icon=":material/emoji_objects:")
|
154 |
+
logger.info("Finished data streamer")
|
155 |
+
|
156 |
+
# Function to display the streamed response
|
157 |
+
def display_stream(stream):
|
158 |
with st.chat_message("assistant"):
|
159 |
+
for content in data_streamer(stream):
|
160 |
+
if isinstance(content, Image.Image):
|
161 |
+
st.image(content)
|
162 |
+
add_message_to_state_session(content)
|
163 |
+
else:
|
164 |
+
st.write(content)
|
165 |
+
add_message_to_state_session(content)
|
166 |
+
|
167 |
+
# Main function to handle user input and assistant response
|
168 |
+
def main():
|
169 |
+
if user_input:
|
170 |
+
# Add user message to session state
|
171 |
+
st.session_state["messages"].append({"role": "user", "content": user_input})
|
172 |
+
|
173 |
+
# Display the user's message
|
174 |
+
with st.chat_message("user"):
|
175 |
+
st.write(user_input)
|
176 |
+
|
177 |
+
# Create a new thread if it doesn't exist
|
178 |
+
if st.session_state["thread_id"] is None:
|
179 |
+
logger.info("Creating new thread")
|
180 |
+
thread = wrapped_openai.beta.threads.create()
|
181 |
+
st.session_state["thread_id"] = thread.id
|
182 |
+
else:
|
183 |
+
thread = wrapped_openai.beta.threads.retrieve(st.session_state["thread_id"])
|
184 |
+
|
185 |
+
# Add user message to the thread
|
186 |
+
wrapped_openai.beta.threads.messages.create(
|
187 |
+
thread_id=thread.id,
|
188 |
+
role="user",
|
189 |
+
content=user_input
|
190 |
+
)
|
191 |
+
|
192 |
+
# Create a new run with streaming
|
193 |
+
logger.info("Creating a new run with streaming")
|
194 |
+
stream = create_run(thread.id, ASSISTANT_ID)
|
195 |
+
|
196 |
+
# Start a separate thread to handle streaming to avoid blocking Streamlit
|
197 |
+
stream_thread = threading.Thread(target=display_stream, args=(stream,))
|
198 |
+
stream_thread.start()
|
199 |
+
|
200 |
+
# Handle tool requests if any
|
201 |
+
while not tool_requests.empty():
|
202 |
+
logger.info("Handling tool requests")
|
203 |
+
tool_request = tool_requests.get()
|
204 |
+
tool_outputs, thread_id, run_id = handle_requires_action(tool_request)
|
205 |
+
wrapped_openai.beta.threads.runs.submit_tool_outputs_stream(
|
206 |
+
thread_id=thread_id,
|
207 |
+
run_id=run_id,
|
208 |
+
tool_outputs=tool_outputs
|
209 |
+
)
|
210 |
+
# After handling, create a new stream to continue the conversation
|
211 |
+
new_stream = create_run(thread_id, ASSISTANT_ID)
|
212 |
+
display_stream(new_stream)
|
213 |
+
|
214 |
+
# Run the main function
|
215 |
+
main()
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
langchain
|
2 |
-
langchain-openai
|
3 |
python-dotenv
|
4 |
-
langsmith
|
|
|
|
|
|
|
|
|
|
1 |
python-dotenv
|
2 |
+
langsmith
|
3 |
+
openai
|
4 |
+
Pillow
|