Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -200,14 +200,11 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search):
|
|
| 200 |
if not question:
|
| 201 |
return "Please enter a question."
|
| 202 |
|
| 203 |
-
if question in memory_database:
|
| 204 |
answer = memory_database[question]
|
| 205 |
else:
|
| 206 |
-
embed = get_embeddings()
|
| 207 |
model = get_model(temperature, top_p, repetition_penalty)
|
| 208 |
|
| 209 |
-
history_str = "\n".join([f"Q: {item['question']}\nA: {item['answer']}" for item in conversation_history])
|
| 210 |
-
|
| 211 |
if web_search:
|
| 212 |
search_results = google_search(question)
|
| 213 |
context_str = "\n".join([result["text"] for result in search_results if result["text"]])
|
|
@@ -219,27 +216,34 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search):
|
|
| 219 |
|
| 220 |
Current Question: {question}
|
| 221 |
|
|
|
|
| 222 |
Provide a concise and direct answer to the question:
|
| 223 |
"""
|
|
|
|
|
|
|
| 224 |
else:
|
|
|
|
| 225 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
|
|
|
|
|
|
| 226 |
if is_related_to_history(question, conversation_history):
|
| 227 |
context_str = "No additional context needed. Please refer to the conversation history."
|
| 228 |
else:
|
| 229 |
retriever = database.as_retriever()
|
| 230 |
relevant_docs = retriever.get_relevant_documents(question)
|
| 231 |
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
|
| 236 |
|
| 237 |
answer = generate_chunked_response(model, formatted_prompt)
|
| 238 |
answer = re.split(r'Question:|Current Question:', answer)[-1].strip()
|
| 239 |
|
| 240 |
-
|
|
|
|
| 241 |
|
| 242 |
-
|
|
|
|
| 243 |
|
| 244 |
return answer
|
| 245 |
|
|
|
|
| 200 |
if not question:
|
| 201 |
return "Please enter a question."
|
| 202 |
|
| 203 |
+
if question in memory_database and not web_search:
|
| 204 |
answer = memory_database[question]
|
| 205 |
else:
|
|
|
|
| 206 |
model = get_model(temperature, top_p, repetition_penalty)
|
| 207 |
|
|
|
|
|
|
|
| 208 |
if web_search:
|
| 209 |
search_results = google_search(question)
|
| 210 |
context_str = "\n".join([result["text"] for result in search_results if result["text"]])
|
|
|
|
| 216 |
|
| 217 |
Current Question: {question}
|
| 218 |
|
| 219 |
+
If the web search results don't contain relevant information, state that the information is not available in the search results.
|
| 220 |
Provide a concise and direct answer to the question:
|
| 221 |
"""
|
| 222 |
+
prompt_val = ChatPromptTemplate.from_template(prompt_template)
|
| 223 |
+
formatted_prompt = prompt_val.format(context=context_str, question=question)
|
| 224 |
else:
|
| 225 |
+
embed = get_embeddings()
|
| 226 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
| 227 |
+
history_str = "\n".join([f"Q: {item['question']}\nA: {item['answer']}" for item in conversation_history])
|
| 228 |
+
|
| 229 |
if is_related_to_history(question, conversation_history):
|
| 230 |
context_str = "No additional context needed. Please refer to the conversation history."
|
| 231 |
else:
|
| 232 |
retriever = database.as_retriever()
|
| 233 |
relevant_docs = retriever.get_relevant_documents(question)
|
| 234 |
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
| 235 |
+
|
| 236 |
+
prompt_val = ChatPromptTemplate.from_template(prompt)
|
| 237 |
+
formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
|
|
|
|
| 238 |
|
| 239 |
answer = generate_chunked_response(model, formatted_prompt)
|
| 240 |
answer = re.split(r'Question:|Current Question:', answer)[-1].strip()
|
| 241 |
|
| 242 |
+
if not web_search:
|
| 243 |
+
memory_database[question] = answer
|
| 244 |
|
| 245 |
+
if not web_search:
|
| 246 |
+
conversation_history = manage_conversation_history(question, answer, conversation_history)
|
| 247 |
|
| 248 |
return answer
|
| 249 |
|