Spaces:
Build error
Build error
Update app1.py
Browse files
app1.py
CHANGED
|
@@ -240,6 +240,18 @@ if query:
|
|
| 240 |
"""
|
| 241 |
)
|
| 242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
|
| 244 |
|
| 245 |
response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
|
|
@@ -252,6 +264,8 @@ if query:
|
|
| 252 |
|
| 253 |
contexts = relevant_contexts_chain.invoke({"context_number":relevant_response['context_number'],"context":context})
|
| 254 |
|
|
|
|
|
|
|
| 255 |
response_chain = LLMChain(llm=rag_llm,prompt=final_prompt,output_key="final_response")
|
| 256 |
|
| 257 |
response = chain.invoke({"query":query,"context":contexts['relevant_contexts']})
|
|
|
|
| 240 |
"""
|
| 241 |
)
|
| 242 |
|
| 243 |
+
rag_prompt = """ You are ahelpful assistant very profiient in formulating clear and meaningful answers from the context provided.Based on the CONTEXT Provided ,Please formulate
|
| 244 |
+
a clear concise and meaningful answer for the QUERY asked.Please refrain from making up your own answer in case the COTEXT provided is not sufficient to answer the QUERY.In such a situation please respond as 'I do not know'.
|
| 245 |
+
|
| 246 |
+
QUERY:
|
| 247 |
+
{query}
|
| 248 |
+
|
| 249 |
+
CONTEXT
|
| 250 |
+
{context}
|
| 251 |
+
|
| 252 |
+
ANSWER:
|
| 253 |
+
"""
|
| 254 |
+
|
| 255 |
context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
|
| 256 |
|
| 257 |
response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
|
|
|
|
| 264 |
|
| 265 |
contexts = relevant_contexts_chain.invoke({"context_number":relevant_response['context_number'],"context":context})
|
| 266 |
|
| 267 |
+
final_prompt = PromptTemplate(input_variables=["query","context"],template=rag_prompt)
|
| 268 |
+
|
| 269 |
response_chain = LLMChain(llm=rag_llm,prompt=final_prompt,output_key="final_response")
|
| 270 |
|
| 271 |
response = chain.invoke({"query":query,"context":contexts['relevant_contexts']})
|