Sasidhar commited on
Commit
80a312e
·
verified ·
1 Parent(s): 4a9f074

Delete gaurdrails_manager.py

Browse files
Files changed (1) hide show
  1. gaurdrails_manager.py +0 -50
gaurdrails_manager.py DELETED
@@ -1,50 +0,0 @@
1
- from models import GuardrailsConfig
2
-
3
- # A simple result class to hold individual check outcomes.
4
- class Result:
5
- def __init__(self):
6
- self.details = {}
7
-
8
- def add(self, rule_name: str, passed: bool):
9
- self.details[rule_name] = passed
10
-
11
- def grounded(self) -> bool:
12
- # The response is considered "grounded" if all enabled rules pass.
13
- return all(self.details.values())
14
-
15
- # Define guardrail rule classes.
16
- class FactualConsistencyRule:
17
- name = "FactualConsistency"
18
-
19
- def check(self, response_text: str) -> bool:
20
- # For demonstration: pass if the response contains the word "fact".
21
- return "fact" in response_text.lower()
22
-
23
- class ToxicityRule:
24
- name = "Toxicity"
25
-
26
- def check(self, response_text: str) -> bool:
27
- # For demonstration: fail if negative words like "hate" or "kill" are found.
28
- return not re.search(r"(hate|kill)", response_text, re.IGNORECASE)
29
-
30
- # Manager class to load and execute the enabled guardrail rules.
31
- class GuardrailsManager:
32
- def __init__(self, config: GuardrailsConfig):
33
- self.config = config
34
- self.rules = self.load_rules()
35
-
36
- def load_rules(self):
37
- rules = []
38
- if self.config.factual_consistency:
39
- rules.append(FactualConsistencyRule())
40
- if self.config.toxicity:
41
- rules.append(ToxicityRule())
42
- # Add additional rules based on configuration here.
43
- return rules
44
-
45
- def check(self, response_text: str) -> Result:
46
- result = Result()
47
- for rule in self.rules:
48
- rule_result = rule.check(response_text)
49
- result.add(rule.name, rule_result)
50
- return result