mavinsao commited on
Commit
14a6afe
·
verified ·
1 Parent(s): 43e9ad6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -52,7 +52,7 @@ Recommendation:
52
 
53
  PROMPT = PromptTemplate(
54
  template=prompt_template,
55
- input_variables=["chat_history", "question", "context"]
56
  )
57
 
58
  # Initialize the language model
@@ -119,11 +119,12 @@ if prompt := st.chat_input("What are you looking to learn?"):
119
  st.markdown(prompt)
120
 
121
  # Retrieve relevant context from the vector store based on user input
122
- context = retriever.retrieve(prompt)
 
123
 
124
  # Assistant response generator with streaming effect
125
  with st.chat_message("assistant"):
126
- response = qa_chain({"question": prompt, "context": context})
127
  response_text = response["answer"]
128
 
129
  # Simulate streaming response
@@ -139,4 +140,4 @@ if prompt := st.chat_input("What are you looking to learn?"):
139
  # Optional: Add a button to clear the chat history
140
  if st.button("Clear Chat History"):
141
  st.session_state.messages.clear()
142
- st.experimental_rerun()
 
52
 
53
  PROMPT = PromptTemplate(
54
  template=prompt_template,
55
+ input_variables=["chat_history", "question"]
56
  )
57
 
58
  # Initialize the language model
 
119
  st.markdown(prompt)
120
 
121
  # Retrieve relevant context from the vector store based on user input
122
+ context_documents = retriever.retrieve(prompt)
123
+ context = " ".join([doc.page_content for doc in context_documents]) # Combine the content of the retrieved documents
124
 
125
  # Assistant response generator with streaming effect
126
  with st.chat_message("assistant"):
127
+ response = qa_chain({"question": prompt, "chat_history": st.session_state.messages, "context": context})
128
  response_text = response["answer"]
129
 
130
  # Simulate streaming response
 
140
  # Optional: Add a button to clear the chat history
141
  if st.button("Clear Chat History"):
142
  st.session_state.messages.clear()
143
+ st.experimental_rerun()