AbenzaFran commited on
Commit
73d49e1
·
1 Parent(s): ad0f88c

Add application file

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import streamlit as st
4
+ from dotenv import load_dotenv
5
+ from langchain.agents.openai_assistant import OpenAIAssistantRunnable
6
+
7
+ # Load environment variables
8
+ load_dotenv()
9
+ api_key = os.getenv("OPENAI_API_KEY")
10
+ extractor_agent = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A")
11
+
12
+ # Create the assistant
13
+ extractor_llm = OpenAIAssistantRunnable(
14
+ assistant_id=extractor_agent,
15
+ api_key=api_key,
16
+ as_agent=True
17
+ )
18
+
19
+ def remove_citation(text: str) -> str:
20
+ pattern = r"【\d+†\w+】"
21
+ return re.sub(pattern, "📚", text)
22
+
23
+ # Initialize session state for messages and thread_id
24
+ if "messages" not in st.session_state:
25
+ st.session_state["messages"] = []
26
+ if "thread_id" not in st.session_state:
27
+ st.session_state["thread_id"] = None
28
+
29
+ st.title("Solution Specifier A")
30
+
31
+ def predict(user_input: str) -> str:
32
+ """
33
+ This function calls our OpenAIAssistantRunnable to get a response.
34
+ If we don't have a thread_id yet, we create a new thread on the first call.
35
+ Otherwise, we continue the existing thread.
36
+ """
37
+ if st.session_state["thread_id"] is None:
38
+ response = extractor_llm.invoke({"content": user_input})
39
+ st.session_state["thread_id"] = response.thread_id
40
+ else:
41
+ response = extractor_llm.invoke(
42
+ {"content": user_input, "thread_id": st.session_state["thread_id"]}
43
+ )
44
+ output = response.return_values["output"]
45
+ return remove_citation(output)
46
+
47
+ # Display any existing messages (from a previous run or refresh)
48
+ for msg in st.session_state["messages"]:
49
+ if msg["role"] == "user":
50
+ with st.chat_message("user"):
51
+ st.write(msg["content"])
52
+ else:
53
+ with st.chat_message("assistant"):
54
+ st.write(msg["content"])
55
+
56
+ # Create the chat input widget at the bottom of the page
57
+ user_input = st.chat_input("Type your message here...")
58
+
59
+ # When the user hits ENTER on st.chat_input
60
+ if user_input:
61
+ # Add the user message to session state
62
+ st.session_state["messages"].append({"role": "user", "content": user_input})
63
+
64
+ # Display the user's message
65
+ with st.chat_message("user"):
66
+ st.write(user_input)
67
+
68
+ # Get the assistant's response
69
+ response_text = predict(user_input)
70
+
71
+ # Add the assistant response to session state
72
+ st.session_state["messages"].append({"role": "assistant", "content": response_text})
73
+
74
+ # Display the assistant's reply
75
+ with st.chat_message("assistant"):
76
+ st.write(response_text)