AbenzaFran commited on
Commit
7c11bd6
Β·
1 Parent(s): b8736af
Files changed (2) hide show
  1. app.py +30 -20
  2. requirements.txt +2 -1
app.py CHANGED
@@ -2,20 +2,15 @@ import os
2
  import re
3
  import streamlit as st
4
  from dotenv import load_dotenv
5
- from langchain.agents.openai_assistant import OpenAIAssistantRunnable
 
6
 
7
  # Load environment variables
8
  load_dotenv()
9
  api_key = os.getenv("OPENAI_API_KEY")
10
- extractor_agent = os.getenv("ASSISTANT_ID_SOLUTION_SPECIFIER_A")
11
-
12
- # Create the assistant
13
- extractor_llm = OpenAIAssistantRunnable(
14
- assistant_id=extractor_agent,
15
- api_key=api_key,
16
- as_agent=True
17
- )
18
 
 
19
  def remove_citation(text: str) -> str:
20
  pattern = r"【\d+†\w+】"
21
  return re.sub(pattern, "πŸ“š", text)
@@ -28,21 +23,36 @@ if "thread_id" not in st.session_state:
28
 
29
  st.title("Solution Specifier A")
30
 
31
- def predict(user_input: str) -> str:
 
 
32
  """
33
- This function calls our OpenAIAssistantRunnable to get a response.
34
- If we don't have a thread_id yet, we create a new thread on the first call.
35
- Otherwise, we continue the existing thread.
36
  """
37
- if st.session_state["thread_id"] is None:
38
- response = extractor_llm.invoke({"content": user_input})
39
- st.session_state["thread_id"] = response.thread_id
 
 
 
 
40
  else:
41
- response = extractor_llm.invoke(
42
- {"content": user_input, "thread_id": st.session_state["thread_id"]}
 
43
  )
44
- output = response.return_values["output"]
45
- return remove_citation(output)
 
 
 
 
 
 
 
 
46
 
47
  # Display any existing messages (from a previous run or refresh)
48
  for msg in st.session_state["messages"]:
 
2
  import re
3
  import streamlit as st
4
  from dotenv import load_dotenv
5
+ import openai
6
+ from langsmith import traceable
7
 
8
  # Load environment variables
9
  load_dotenv()
10
  api_key = os.getenv("OPENAI_API_KEY")
11
+ openai.api_key = api_key
 
 
 
 
 
 
 
12
 
13
+ # Helper function to remove citations
14
  def remove_citation(text: str) -> str:
15
  pattern = r"【\d+†\w+】"
16
  return re.sub(pattern, "πŸ“š", text)
 
23
 
24
  st.title("Solution Specifier A")
25
 
26
+ # Traceable function for predict logic
27
+ @traceable
28
+ def get_response(user_input: str, thread_id: str = None):
29
  """
30
+ This function calls OpenAI API to get a response.
31
+ If thread_id is provided, it continues the conversation.
32
+ Otherwise, it starts a new conversation.
33
  """
34
+ messages = [{"role": "user", "content": user_input}]
35
+ if thread_id:
36
+ response = openai.ChatCompletion.create(
37
+ model="gpt-3.5-turbo",
38
+ messages=messages,
39
+ user=thread_id
40
+ )
41
  else:
42
+ response = openai.ChatCompletion.create(
43
+ model="gpt-3.5-turbo",
44
+ messages=messages
45
  )
46
+ return response["choices"][0]["message"]["content"], response["id"]
47
+
48
+ # Streamlit app logic
49
+ def predict(user_input: str) -> str:
50
+ if st.session_state["thread_id"] is None:
51
+ response_text, thread_id = get_response(user_input)
52
+ st.session_state["thread_id"] = thread_id
53
+ else:
54
+ response_text, _ = get_response(user_input, thread_id=st.session_state["thread_id"])
55
+ return remove_citation(response_text)
56
 
57
  # Display any existing messages (from a previous run or refresh)
58
  for msg in st.session_state["messages"]:
requirements.txt CHANGED
@@ -1,3 +1,4 @@
1
  langchain
2
  langchain-openai
3
- python-dotenv
 
 
1
  langchain
2
  langchain-openai
3
+ python-dotenv
4
+ langsmith