DrishtiSharma commited on
Commit
4d5a1a3
Β·
verified Β·
1 Parent(s): c0aa263

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -0
app.py CHANGED
@@ -259,6 +259,28 @@ if query:
259
  context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
260
 
261
  response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262
 
263
  pick_relevant_context_chain = LLMChain(llm=llm_judge, prompt=relevant_prompt, output_key="context_number")
264
 
 
259
  context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
260
 
261
  response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
262
+
263
+ # Debug Raw LLM Output
264
+ st.write("πŸ› οΈ Debugging: Raw LLM Response for Relevancy:", response_crisis['relevancy_response'])
265
+
266
+ # Extract raw JSON response
267
+ raw_response = response_crisis['relevancy_response']
268
+
269
+ # πŸ”§ **Sanitize the response by removing `<think>` or unwanted text**
270
+ if "<think>" in raw_response:
271
+ raw_response = raw_response.split("<think>")[-1] # Keep only JSON part
272
+ if "</think>" in raw_response:
273
+ raw_response = raw_response.split("</think>")[0] # Remove trailing text
274
+
275
+ # πŸ” **Try parsing the JSON safely**
276
+ try:
277
+ relevancy_response = json.loads(raw_response)
278
+ st.write("βœ… Successfully parsed JSON:", relevancy_response) # Debugging output
279
+ except json.JSONDecodeError as e:
280
+ st.error(f"❌ Failed to parse JSON: {e}")
281
+ st.write("πŸ” Raw LLM Response Before Parsing:", raw_response) # Debugging output
282
+ relevancy_response = None # Prevent breaking the pipeline
283
+
284
 
285
  pick_relevant_context_chain = LLMChain(llm=llm_judge, prompt=relevant_prompt, output_key="context_number")
286