Debug Added
Browse files
utils.py
CHANGED
@@ -176,10 +176,10 @@ class SafetyGuard:
|
|
176 |
"""Validates input and filters output"""
|
177 |
|
178 |
def __init__(self):
|
179 |
-
self.financial_terms = {
|
180 |
-
|
181 |
-
|
182 |
-
}
|
183 |
self.blocked_topics = {
|
184 |
'politics', 'sports', 'entertainment', 'religion',
|
185 |
'medical', 'hypothetical', 'opinion', 'personal'
|
@@ -187,10 +187,10 @@ class SafetyGuard:
|
|
187 |
|
188 |
def validate_input(self, query: str) -> Tuple[bool, str]:
|
189 |
query_lower = query.lower()
|
|
|
|
|
190 |
if any(topic in query_lower for topic in self.blocked_topics):
|
191 |
return False, "I only discuss financial topics."
|
192 |
-
if not any(term in query_lower for term in self.financial_terms):
|
193 |
-
return False, "Please ask financial questions."
|
194 |
return True, ""
|
195 |
|
196 |
def filter_output(self, response: str) -> str:
|
@@ -266,18 +266,26 @@ Context: {context}<|im_end|>
|
|
266 |
Answer:"""
|
267 |
|
268 |
print(f"\n\n[For Debug Only] Prompt: {prompt}\n\n")
|
269 |
-
|
270 |
response = generator(prompt)[0]['generated_text']
|
|
|
|
|
271 |
clean_response = extract_final_response(response)
|
272 |
clean_response = guard.filter_output(clean_response)
|
273 |
-
|
|
|
274 |
query_embed = embeddings.embed_query(query)
|
|
|
|
|
275 |
response_embed = embeddings.embed_query(clean_response)
|
|
|
|
|
276 |
confidence = cosine_similarity([query_embed], [response_embed])[0][0]
|
277 |
-
|
278 |
-
|
279 |
-
|
|
|
|
|
280 |
return clean_response, round(confidence, 2)
|
281 |
|
282 |
except Exception as e:
|
283 |
-
return f"Error processing request: {e}", 0.0
|
|
|
176 |
"""Validates input and filters output"""
|
177 |
|
178 |
def __init__(self):
|
179 |
+
# self.financial_terms = {
|
180 |
+
# 'revenue', 'profit', 'ebitda', 'balance', 'cash',
|
181 |
+
# 'income', 'fiscal', 'growth', 'margin', 'expense'
|
182 |
+
# }
|
183 |
self.blocked_topics = {
|
184 |
'politics', 'sports', 'entertainment', 'religion',
|
185 |
'medical', 'hypothetical', 'opinion', 'personal'
|
|
|
187 |
|
188 |
def validate_input(self, query: str) -> Tuple[bool, str]:
|
189 |
query_lower = query.lower()
|
190 |
+
# if not any(term in query_lower for term in self.financial_terms):
|
191 |
+
# return False, "Please ask financial questions."
|
192 |
if any(topic in query_lower for topic in self.blocked_topics):
|
193 |
return False, "I only discuss financial topics."
|
|
|
|
|
194 |
return True, ""
|
195 |
|
196 |
def filter_output(self, response: str) -> str:
|
|
|
266 |
Answer:"""
|
267 |
|
268 |
print(f"\n\n[For Debug Only] Prompt: {prompt}\n\n")
|
|
|
269 |
response = generator(prompt)[0]['generated_text']
|
270 |
+
print(f"\n\n[For Debug Only] response: {response}\n\n")
|
271 |
+
|
272 |
clean_response = extract_final_response(response)
|
273 |
clean_response = guard.filter_output(clean_response)
|
274 |
+
print(f"\n\n[For Debug Only] clean_response: {clean_response}\n\n")
|
275 |
+
|
276 |
query_embed = embeddings.embed_query(query)
|
277 |
+
print(f"\n\n[For Debug Only] query_embed: {query_embed}\n\n")
|
278 |
+
|
279 |
response_embed = embeddings.embed_query(clean_response)
|
280 |
+
print(f"\n\n[For Debug Only] response_embed: {response_embed}\n\n")
|
281 |
+
|
282 |
confidence = cosine_similarity([query_embed], [response_embed])[0][0]
|
283 |
+
print(f"\n\n[For Debug Only] confidence: {confidence}\n\n")
|
284 |
+
|
285 |
+
memory.add_interaction(query, clean_response)
|
286 |
+
|
287 |
+
print(f"\n\n[For Debug Only] I'm Done \n\n")
|
288 |
return clean_response, round(confidence, 2)
|
289 |
|
290 |
except Exception as e:
|
291 |
+
return f"Error processing request: {e}", 0.0
|