Spaces:
Sleeping
Sleeping
corrected the
Browse files- pipeline.py +10 -12
pipeline.py
CHANGED
@@ -35,11 +35,11 @@ from langchain_core.callbacks import Callbacks
|
|
35 |
|
36 |
# Custom chain imports
|
37 |
# from groq_client import GroqClient
|
38 |
-
from
|
39 |
-
from
|
40 |
-
from
|
41 |
-
from
|
42 |
-
from
|
43 |
|
44 |
# Mistral moderation
|
45 |
from mistralai import Mistral
|
@@ -66,10 +66,8 @@ from smolagents import (
|
|
66 |
)
|
67 |
|
68 |
# Import new prompts
|
69 |
-
from
|
70 |
-
|
71 |
-
classification_prompt, refusal_prompt, tailor_prompt, cleaner_prompt
|
72 |
-
)
|
73 |
|
74 |
logging.basicConfig(level=logging.INFO)
|
75 |
logger = logging.getLogger(__name__)
|
@@ -488,20 +486,20 @@ class PipelineState:
|
|
488 |
self.cleaner_chain = get_cleaner_chain()
|
489 |
|
490 |
# Specialized chain for self-harm
|
491 |
-
from
|
492 |
# self.self_harm_chain = LLMChain(llm=gemini_llm, prompt=selfharm_prompt, verbose=False)
|
493 |
|
494 |
self.self_harm_chain = LLMChain(llm=groq_fallback_llm, prompt=selfharm_prompt, verbose=False)
|
495 |
|
496 |
|
497 |
# NEW: chain for frustration/harsh queries
|
498 |
-
from
|
499 |
# self.frustration_chain = LLMChain(llm=gemini_llm, prompt=frustration_prompt, verbose=False)
|
500 |
self.frustration_chain = LLMChain(llm=groq_fallback_llm, prompt=frustration_prompt, verbose=False)
|
501 |
|
502 |
|
503 |
# NEW: chain for ethical conflict queries
|
504 |
-
from
|
505 |
# self.ethical_conflict_chain = LLMChain(llm=gemini_llm, prompt=ethical_conflict_prompt, verbose=False)
|
506 |
self.ethical_conflict_chain = LLMChain(llm=groq_fallback_llm, prompt=ethical_conflict_prompt, verbose=False)
|
507 |
|
|
|
35 |
|
36 |
# Custom chain imports
|
37 |
# from groq_client import GroqClient
|
38 |
+
from chain.classification_chain import get_classification_chain
|
39 |
+
from chain.refusal_chain import get_refusal_chain
|
40 |
+
from chain.tailor_chain import get_tailor_chain
|
41 |
+
from chain.cleaner_chain import get_cleaner_chain
|
42 |
+
from chain.tailor_chain_wellnessBrand import get_tailor_chain_wellnessBrand
|
43 |
|
44 |
# Mistral moderation
|
45 |
from mistralai import Mistral
|
|
|
66 |
)
|
67 |
|
68 |
# Import new prompts
|
69 |
+
from chain.prompts import selfharm_prompt, frustration_prompt, ethical_conflict_prompt,classification_prompt, refusal_prompt, tailor_prompt, cleaner_prompt
|
70 |
+
|
|
|
|
|
71 |
|
72 |
logging.basicConfig(level=logging.INFO)
|
73 |
logger = logging.getLogger(__name__)
|
|
|
486 |
self.cleaner_chain = get_cleaner_chain()
|
487 |
|
488 |
# Specialized chain for self-harm
|
489 |
+
from chain.prompts import selfharm_prompt
|
490 |
# self.self_harm_chain = LLMChain(llm=gemini_llm, prompt=selfharm_prompt, verbose=False)
|
491 |
|
492 |
self.self_harm_chain = LLMChain(llm=groq_fallback_llm, prompt=selfharm_prompt, verbose=False)
|
493 |
|
494 |
|
495 |
# NEW: chain for frustration/harsh queries
|
496 |
+
from chain.prompts import frustration_prompt
|
497 |
# self.frustration_chain = LLMChain(llm=gemini_llm, prompt=frustration_prompt, verbose=False)
|
498 |
self.frustration_chain = LLMChain(llm=groq_fallback_llm, prompt=frustration_prompt, verbose=False)
|
499 |
|
500 |
|
501 |
# NEW: chain for ethical conflict queries
|
502 |
+
from chain.prompts import ethical_conflict_prompt
|
503 |
# self.ethical_conflict_chain = LLMChain(llm=gemini_llm, prompt=ethical_conflict_prompt, verbose=False)
|
504 |
self.ethical_conflict_chain = LLMChain(llm=groq_fallback_llm, prompt=ethical_conflict_prompt, verbose=False)
|
505 |
|