DrishtiSharma commited on
Commit
1de83ba
Β·
verified Β·
1 Parent(s): 0bdd491

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +81 -19
app.py CHANGED
@@ -150,19 +150,90 @@ if query:
150
  with st.spinner("πŸ”„ Retrieving relevant context..."):
151
  retriever = st.session_state.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
152
  retrieved_docs = retriever.invoke(query)
153
- context = [d.page_content for d in retrieved_docs]
154
- st.success("βœ… Context retrieved successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
  # ----------------- Run Individual Chains Explicitly -----------------
157
- context_relevancy_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["retriever_query", "context"], template=relevancy_prompt), output_key="relevancy_response")
158
- relevant_context_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["relevancy_response"], template=relevant_context_picker_prompt), output_key="context_number")
159
- relevant_contexts_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["context_number", "context"], template=response_synth), output_key="relevant_contexts")
160
- response_chain = LLMChain(llm=rag_llm, prompt=PromptTemplate(input_variables=["query", "context"], template=rag_prompt), output_key="final_response")
161
 
162
- response_crisis = context_relevancy_chain.invoke({"context": context, "retriever_query": query})
163
- relevant_response = relevant_context_chain.invoke({"relevancy_response": response_crisis["relevancy_response"]})
164
- contexts = relevant_contexts_chain.invoke({"context_number": relevant_response["context_number"], "context": context})
165
- final_response = response_chain.invoke({"query": query, "context": contexts["relevant_contexts"]})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166
 
167
  # ----------------- Display All Outputs -----------------
168
  st.markdown("### Context Relevancy Evaluation")
@@ -175,13 +246,4 @@ if query:
175
  st.json(contexts["relevant_contexts"])
176
 
177
  st.subheader("context_relevancy_evaluation_chain Statement")
178
- st.json(final_response["relevancy_response"])
179
-
180
- st.subheader("pick_relevant_context_chain Statement")
181
- st.json(final_response["context_number"])
182
-
183
- st.subheader("relevant_contexts_chain Statement")
184
- st.json(final_response["relevant_contexts"])
185
-
186
- st.subheader("RAG Response Statement")
187
  st.json(final_response["final_response"])
 
150
  with st.spinner("πŸ”„ Retrieving relevant context..."):
151
  retriever = st.session_state.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
152
  retrieved_docs = retriever.invoke(query)
153
+
154
+ # Debugging: Show retrieved documents
155
+ st.write("πŸ”Ή Retrieved Documents:", retrieved_docs)
156
+
157
+ if not retrieved_docs:
158
+ st.error("❌ No relevant documents retrieved! Try a different query.")
159
+ else:
160
+ # Ensure extracted content is formatted correctly
161
+ context = [d.page_content for d in retrieved_docs]
162
+
163
+ if isinstance(context, list): # Convert list to string for LLMChain
164
+ context_str = "\n".join(context)
165
+ else:
166
+ context_str = str(context)
167
+
168
+ st.success("βœ… Context retrieved successfully!")
169
+ st.write("πŸ”Ή Extracted Context:", context_str)
170
 
171
  # ----------------- Run Individual Chains Explicitly -----------------
 
 
 
 
172
 
173
+ # Fix: Ensuring all required variables are passed
174
+ context_relevancy_chain = LLMChain(
175
+ llm=llm_judge,
176
+ prompt=PromptTemplate(
177
+ input_variables=["retriever_query", "context"],
178
+ template=relevancy_prompt
179
+ ),
180
+ output_key="relevancy_response"
181
+ )
182
+
183
+ relevant_context_chain = LLMChain(
184
+ llm=llm_judge,
185
+ prompt=PromptTemplate(
186
+ input_variables=["relevancy_response"],
187
+ template=relevant_context_picker_prompt
188
+ ),
189
+ output_key="context_number"
190
+ )
191
+
192
+ relevant_contexts_chain = LLMChain(
193
+ llm=llm_judge,
194
+ prompt=PromptTemplate(
195
+ input_variables=["context_number", "context"],
196
+ template=response_synth
197
+ ),
198
+ output_key="relevant_contexts"
199
+ )
200
+
201
+ response_chain = LLMChain(
202
+ llm=rag_llm,
203
+ prompt=PromptTemplate(
204
+ input_variables=["query", "context"],
205
+ template=rag_prompt
206
+ ),
207
+ output_key="final_response"
208
+ )
209
+
210
+ # ----------------- Fix: Ensuring All Keys Exist -----------------
211
+
212
+ response_crisis = context_relevancy_chain.invoke({
213
+ "context": context_str,
214
+ "retriever_query": query
215
+ })
216
+
217
+ # Debugging: Show intermediate response
218
+ st.write("πŸ” Context Relevancy Response:", response_crisis["relevancy_response"])
219
+
220
+ relevant_response = relevant_context_chain.invoke({
221
+ "relevancy_response": response_crisis["relevancy_response"]
222
+ })
223
+
224
+ st.write("πŸ” Picked Relevant Contexts:", relevant_response["context_number"])
225
+
226
+ contexts = relevant_contexts_chain.invoke({
227
+ "context_number": relevant_response["context_number"],
228
+ "context": context_str # Ensure correct format
229
+ })
230
+
231
+ st.write("πŸ” Extracted Relevant Contexts:", contexts["relevant_contexts"])
232
+
233
+ final_response = response_chain.invoke({
234
+ "query": query,
235
+ "context": contexts["relevant_contexts"]
236
+ })
237
 
238
  # ----------------- Display All Outputs -----------------
239
  st.markdown("### Context Relevancy Evaluation")
 
246
  st.json(contexts["relevant_contexts"])
247
 
248
  st.subheader("context_relevancy_evaluation_chain Statement")
 
 
 
 
 
 
 
 
 
249
  st.json(final_response["final_response"])