Nioooor commited on
Commit
59321ff
·
verified ·
1 Parent(s): 4c4dd9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -103
app.py CHANGED
@@ -68,97 +68,8 @@ with st.sidebar:
68
  if "messages" not in st.session_state:
69
  st.session_state.chain = init_chain()
70
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
71
-
72
- # Function for generating response using the last three conversations
73
- # def generate_response(prompt_input):
74
- # # Initialize result
75
- # result = ''
76
-
77
- # # Prepare conversation history: get the last 3 user and assistant messages
78
- # conversation_history = ""
79
- # recent_messages = st.session_state.messages[-3:] # Last 3 user and assistant exchanges (each exchange is 2 messages)
80
-
81
- # for message in recent_messages:
82
- # conversation_history += f"{message['role']}: {message['content']}\n"
83
-
84
- # # Append the current user prompt to the conversation history
85
- # conversation_history += f"user: {prompt_input}\n"
86
-
87
- # # Invoke chain with the truncated conversation history
88
- # res = st.session_state.chain.invoke(conversation_history)
89
-
90
- # # Process response (as in the original code)
91
- # if res['result'].startswith('According to the provided context, '):
92
- # res['result'] = res['result'][35:]
93
- # res['result'] = res['result'][0].upper() + res['result'][1:]
94
- # elif res['result'].startswith('Based on the provided context, '):
95
- # res['result'] = res['result'][31:]
96
- # res['result'] = res['result'][0].upper() + res['result'][1:]
97
- # elif res['result'].startswith('According to the provided text, '):
98
- # res['result'] = res['result'][34:]
99
- # res['result'] = res['result'][0].upper() + res['result'][1:]
100
- # elif res['result'].startswith('According to the context, '):
101
- # res['result'] = res['result'][26:]
102
- # res['result'] = res['result'][0].upper() + res['result'][1:]
103
-
104
-
105
- # result += res['result']
106
-
107
- # # Process sources
108
- # result += '\n\nSources: '
109
- # sources = []
110
- # for source in res["source_documents"]:
111
- # sources.append(source.metadata['source'][122:-4]) # Adjust as per your source format
112
-
113
- # sources = list(set(sources)) # Remove duplicates
114
- # source_list = ", ".join(sources)
115
-
116
- # result += source_list
117
-
118
- # return result, res['result'], source_list
119
- # return result, res['result']
120
- # def generate_response(prompt_input):
121
- # # Prepare conversation history: get the last 3 user and assistant messages
122
- # conversation_history = ""
123
- # recent_messages = st.session_state.messages[-3:] # Last 3 user and assistant exchanges
124
-
125
- # for message in recent_messages:
126
- # conversation_history += f"{message['role']}: {message['content']}\n"
127
-
128
- # # Append the current user prompt to the conversation history
129
- # conversation_history += f"user: {prompt_input}\n"
130
-
131
- # # Invoke chain with the truncated conversation history
132
- # res = st.session_state.chain.invoke(conversation_history)
133
-
134
- # # Process response
135
- # result_text = res['result']
136
- # if result_text.startswith('According to the provided context, '):
137
- # result_text = result_text[35:].capitalize()
138
- # elif result_text.startswith('Based on the provided context, '):
139
- # result_text = result_text[31:].capitalize()
140
- # elif result_text.startswith('According to the provided text, '):
141
- # result_text = result_text[34:].capitalize()
142
- # elif result_text.startswith('According to the context, '):
143
- # result_text = result_text[26:].capitalize()
144
-
145
- # # Extract and format sources
146
- # sources = []
147
- # for source in res.get("source_documents", []): # Safeguard with .get() in case sources are missing
148
- # source_path = source.metadata.get('source', '')
149
- # formatted_source = source_path[122:-4] if source_path else "Unknown source"
150
- # sources.append(formatted_source)
151
-
152
- # # Remove duplicates and combine into a single string
153
- # unique_sources = list(set(sources))
154
- # source_list = ", ".join(unique_sources)
155
-
156
- # # Combine response text with sources
157
- # result_text += f"\n\n**Sources:** {source_list}" if source_list else "\n\n**Sources:** None"
158
-
159
- # return result_text
160
-
161
- # return res['result']
162
 
163
  def generate_response(prompt_input):
164
  try:
@@ -166,19 +77,11 @@ def generate_response(prompt_input):
166
  retriever = st.session_state.chain.retriever
167
  relevant_context = retriever.get_relevant_documents(prompt_input) # Retrieve context only for the current prompt
168
 
169
- # Prepare full conversation history for the LLM
170
- conversation_history = ""
171
- for message in st.session_state.messages:
172
- conversation_history += f"{message['role']}: {message['content']}\n"
173
-
174
- # Append the current user prompt to the conversation history
175
- conversation_history += f"user: {prompt_input}\n"
176
-
177
  # Format the input for the chain with the retrieved context
178
  formatted_input = (
179
  f"Context:\n"
180
  f"{' '.join([doc.page_content for doc in relevant_context])}\n\n"
181
- f"Conversation:\n{conversation_history}"
182
  )
183
 
184
  # Invoke the RetrievalQA chain directly with the formatted input
@@ -214,6 +117,9 @@ def generate_response(prompt_input):
214
  # # Combine response text with sources
215
  # result_text += f"\n\n**Sources:** {source_list}" if source_list else "\n\n**Sources:** None"
216
 
 
 
 
217
  return result_text
218
 
219
  except Exception as e:
@@ -223,6 +129,7 @@ def generate_response(prompt_input):
223
  else:
224
  return f"❌ An error occurred: {str(e)}"
225
 
 
226
  # Display chat messages
227
  for message in st.session_state.messages:
228
  with st.chat_message(message["role"]):
@@ -230,6 +137,8 @@ for message in st.session_state.messages:
230
 
231
  # User-provided prompt for input box
232
  if prompt := st.chat_input(placeholder="Ask a question..."):
 
 
233
  # Append user query to session state
234
  st.session_state.messages.append({"role": "user", "content": prompt})
235
  with st.chat_message("user"):
@@ -244,6 +153,12 @@ if prompt := st.chat_input(placeholder="Ask a question..."):
244
  message_placeholder.markdown(response) # Replace placeholder with actual response
245
  st.session_state.messages.append({"role": "assistant", "content": response})
246
 
 
 
 
 
 
 
247
  # Clear chat history function
248
  def clear_chat_history():
249
  # Clear chat messages (reset the assistant greeting)
@@ -252,8 +167,8 @@ def clear_chat_history():
252
  # Reinitialize the chain to clear any stored history (ensures it forgets previous user inputs)
253
  st.session_state.chain = init_chain()
254
 
255
- # Clear any additional session state that might be remembering user inquiries
256
- if "recent_user_messages" in st.session_state:
257
- del st.session_state["recent_user_messages"] # Clear remembered user inputs
258
 
259
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
 
68
  if "messages" not in st.session_state:
69
  st.session_state.chain = init_chain()
70
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
71
+ st.session_state.query_counter = 0 # Track the number of user queries
72
+ st.session_state.conversation_history = "" # Keep track of history for the LLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
  def generate_response(prompt_input):
75
  try:
 
77
  retriever = st.session_state.chain.retriever
78
  relevant_context = retriever.get_relevant_documents(prompt_input) # Retrieve context only for the current prompt
79
 
 
 
 
 
 
 
 
 
80
  # Format the input for the chain with the retrieved context
81
  formatted_input = (
82
  f"Context:\n"
83
  f"{' '.join([doc.page_content for doc in relevant_context])}\n\n"
84
+ f"Conversation:\n{st.session_state.conversation_history}user: {prompt_input}\n"
85
  )
86
 
87
  # Invoke the RetrievalQA chain directly with the formatted input
 
117
  # # Combine response text with sources
118
  # result_text += f"\n\n**Sources:** {source_list}" if source_list else "\n\n**Sources:** None"
119
 
120
+ # Update conversation history
121
+ st.session_state.conversation_history += f"user: {prompt_input}\nassistant: {result_text}\n"
122
+
123
  return result_text
124
 
125
  except Exception as e:
 
129
  else:
130
  return f"❌ An error occurred: {str(e)}"
131
 
132
+
133
  # Display chat messages
134
  for message in st.session_state.messages:
135
  with st.chat_message(message["role"]):
 
137
 
138
  # User-provided prompt for input box
139
  if prompt := st.chat_input(placeholder="Ask a question..."):
140
+ # Increment query counter
141
+ st.session_state.query_counter += 1
142
  # Append user query to session state
143
  st.session_state.messages.append({"role": "user", "content": prompt})
144
  with st.chat_message("user"):
 
153
  message_placeholder.markdown(response) # Replace placeholder with actual response
154
  st.session_state.messages.append({"role": "assistant", "content": response})
155
 
156
+ # Check if query counter has reached the limit
157
+ if st.session_state.query_counter >= 10:
158
+ st.sidebar.warning("Conversation context has been reset after 10 queries.")
159
+ st.session_state.query_counter = 0 # Reset the counter
160
+ st.session_state.conversation_history = "" # Clear conversation history for the LLM
161
+
162
  # Clear chat history function
163
  def clear_chat_history():
164
  # Clear chat messages (reset the assistant greeting)
 
167
  # Reinitialize the chain to clear any stored history (ensures it forgets previous user inputs)
168
  st.session_state.chain = init_chain()
169
 
170
+ # Clear the query counter and conversation history
171
+ st.session_state.query_counter = 0
172
+ st.session_state.conversation_history = ""
173
 
174
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)