Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -211,6 +211,16 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search):
|
|
211 |
if web_search:
|
212 |
search_results = google_search(question)
|
213 |
context_str = "\n".join([result["text"] for result in search_results if result["text"]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
else:
|
215 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
216 |
if is_related_to_history(question, conversation_history):
|
@@ -219,9 +229,10 @@ def ask_question(question, temperature, top_p, repetition_penalty, web_search):
|
|
219 |
retriever = database.as_retriever()
|
220 |
relevant_docs = retriever.get_relevant_documents(question)
|
221 |
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
|
|
222 |
|
223 |
-
prompt_val = ChatPromptTemplate.from_template(
|
224 |
-
formatted_prompt = prompt_val.format(history=history_str, context=context_str
|
225 |
|
226 |
answer = generate_chunked_response(model, formatted_prompt)
|
227 |
answer = re.split(r'Question:|Current Question:', answer)[-1].strip()
|
|
|
211 |
if web_search:
|
212 |
search_results = google_search(question)
|
213 |
context_str = "\n".join([result["text"] for result in search_results if result["text"]])
|
214 |
+
prompt_template = """
|
215 |
+
Answer the question based on the following web search results:
|
216 |
+
|
217 |
+
Web Search Results:
|
218 |
+
{context}
|
219 |
+
|
220 |
+
Current Question: {question}
|
221 |
+
|
222 |
+
Provide a concise and direct answer to the question:
|
223 |
+
"""
|
224 |
else:
|
225 |
database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
|
226 |
if is_related_to_history(question, conversation_history):
|
|
|
229 |
retriever = database.as_retriever()
|
230 |
relevant_docs = retriever.get_relevant_documents(question)
|
231 |
context_str = "\n".join([doc.page_content for doc in relevant_docs])
|
232 |
+
prompt_template = prompt # Use the original prompt for document-based answers
|
233 |
|
234 |
+
prompt_val = ChatPromptTemplate.from_template(prompt_template)
|
235 |
+
formatted_prompt = prompt_val.format(history=history_str, context=context_str, question=question)
|
236 |
|
237 |
answer = generate_chunked_response(model, formatted_prompt)
|
238 |
answer = re.split(r'Question:|Current Question:', answer)[-1].strip()
|