Spaces:
Sleeping
Sleeping
import os | |
import re | |
import streamlit as st | |
from dotenv import load_dotenv | |
from langchain.agents.openai_assistant import OpenAIAssistantRunnable | |
# Load environment variables | |
load_dotenv() | |
api_key = os.getenv("OPENAI_API_KEY") | |
extractor_agent = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A") | |
# Create the assistant | |
extractor_llm = OpenAIAssistantRunnable( | |
assistant_id=extractor_agent, | |
api_key=api_key, | |
as_agent=True | |
) | |
def remove_citation(text: str) -> str: | |
pattern = r"γ\d+β \w+γ" | |
return re.sub(pattern, "π", text) | |
# Initialize session state for messages and thread_id | |
if "messages" not in st.session_state: | |
st.session_state["messages"] = [] | |
if "thread_id" not in st.session_state: | |
st.session_state["thread_id"] = None | |
st.title("Solution Specifier A") | |
def predict(user_input: str) -> str: | |
""" | |
This function calls our OpenAIAssistantRunnable to get a response. | |
If we don't have a thread_id yet, we create a new thread on the first call. | |
Otherwise, we continue the existing thread. | |
""" | |
if st.session_state["thread_id"] is None: | |
response = extractor_llm.invoke({"content": user_input}) | |
st.session_state["thread_id"] = response.thread_id | |
else: | |
response = extractor_llm.invoke( | |
{"content": user_input, "thread_id": st.session_state["thread_id"]} | |
) | |
output = response.return_values["output"] | |
return remove_citation(output) | |
# Display any existing messages (from a previous run or refresh) | |
for msg in st.session_state["messages"]: | |
if msg["role"] == "user": | |
with st.chat_message("user"): | |
st.write(msg["content"]) | |
else: | |
with st.chat_message("assistant"): | |
st.write(msg["content"]) | |
# Create the chat input widget at the bottom of the page | |
user_input = st.chat_input("Type your message here...") | |
# When the user hits ENTER on st.chat_input | |
if user_input: | |
# Add the user message to session state | |
st.session_state["messages"].append({"role": "user", "content": user_input}) | |
# Display the user's message | |
with st.chat_message("user"): | |
st.write(user_input) | |
# Get the assistant's response | |
response_text = predict(user_input) | |
# Add the assistant response to session state | |
st.session_state["messages"].append({"role": "assistant", "content": response_text}) | |
# Display the assistant's reply | |
with st.chat_message("assistant"): | |
st.write(response_text) |