Phoenix21 commited on
Commit
797ab8a
·
1 Parent(s): 9639e4c

corrected the

Browse files
Files changed (1) hide show
  1. pipeline.py +10 -12
pipeline.py CHANGED
@@ -35,11 +35,11 @@ from langchain_core.callbacks import Callbacks
35
 
36
  # Custom chain imports
37
  # from groq_client import GroqClient
38
- from HealthyAiExpert.chain.classification_chain import get_classification_chain
39
- from HealthyAiExpert.chain.refusal_chain import get_refusal_chain
40
- from HealthyAiExpert.chain.tailor_chain import get_tailor_chain
41
- from HealthyAiExpert.chain.cleaner_chain import get_cleaner_chain
42
- from HealthyAiExpert.chain.tailor_chain_wellnessBrand import get_tailor_chain_wellnessBrand
43
 
44
  # Mistral moderation
45
  from mistralai import Mistral
@@ -66,10 +66,8 @@ from smolagents import (
66
  )
67
 
68
  # Import new prompts
69
- from HealthyAiExpert.chain.prompts import (
70
- selfharm_prompt, frustration_prompt, ethical_conflict_prompt,
71
- classification_prompt, refusal_prompt, tailor_prompt, cleaner_prompt
72
- )
73
 
74
  logging.basicConfig(level=logging.INFO)
75
  logger = logging.getLogger(__name__)
@@ -488,20 +486,20 @@ class PipelineState:
488
  self.cleaner_chain = get_cleaner_chain()
489
 
490
  # Specialized chain for self-harm
491
- from HealthyAiExpert.chain.prompts import selfharm_prompt
492
  # self.self_harm_chain = LLMChain(llm=gemini_llm, prompt=selfharm_prompt, verbose=False)
493
 
494
  self.self_harm_chain = LLMChain(llm=groq_fallback_llm, prompt=selfharm_prompt, verbose=False)
495
 
496
 
497
  # NEW: chain for frustration/harsh queries
498
- from HealthyAiExpert.chain.prompts import frustration_prompt
499
  # self.frustration_chain = LLMChain(llm=gemini_llm, prompt=frustration_prompt, verbose=False)
500
  self.frustration_chain = LLMChain(llm=groq_fallback_llm, prompt=frustration_prompt, verbose=False)
501
 
502
 
503
  # NEW: chain for ethical conflict queries
504
- from HealthyAiExpert.chain.prompts import ethical_conflict_prompt
505
  # self.ethical_conflict_chain = LLMChain(llm=gemini_llm, prompt=ethical_conflict_prompt, verbose=False)
506
  self.ethical_conflict_chain = LLMChain(llm=groq_fallback_llm, prompt=ethical_conflict_prompt, verbose=False)
507
 
 
35
 
36
  # Custom chain imports
37
  # from groq_client import GroqClient
38
+ from chain.classification_chain import get_classification_chain
39
+ from chain.refusal_chain import get_refusal_chain
40
+ from chain.tailor_chain import get_tailor_chain
41
+ from chain.cleaner_chain import get_cleaner_chain
42
+ from chain.tailor_chain_wellnessBrand import get_tailor_chain_wellnessBrand
43
 
44
  # Mistral moderation
45
  from mistralai import Mistral
 
66
  )
67
 
68
  # Import new prompts
69
+ from chain.prompts import selfharm_prompt, frustration_prompt, ethical_conflict_prompt,classification_prompt, refusal_prompt, tailor_prompt, cleaner_prompt
70
+
 
 
71
 
72
  logging.basicConfig(level=logging.INFO)
73
  logger = logging.getLogger(__name__)
 
486
  self.cleaner_chain = get_cleaner_chain()
487
 
488
  # Specialized chain for self-harm
489
+ from chain.prompts import selfharm_prompt
490
  # self.self_harm_chain = LLMChain(llm=gemini_llm, prompt=selfharm_prompt, verbose=False)
491
 
492
  self.self_harm_chain = LLMChain(llm=groq_fallback_llm, prompt=selfharm_prompt, verbose=False)
493
 
494
 
495
  # NEW: chain for frustration/harsh queries
496
+ from chain.prompts import frustration_prompt
497
  # self.frustration_chain = LLMChain(llm=gemini_llm, prompt=frustration_prompt, verbose=False)
498
  self.frustration_chain = LLMChain(llm=groq_fallback_llm, prompt=frustration_prompt, verbose=False)
499
 
500
 
501
  # NEW: chain for ethical conflict queries
502
+ from chain.prompts import ethical_conflict_prompt
503
  # self.ethical_conflict_chain = LLMChain(llm=gemini_llm, prompt=ethical_conflict_prompt, verbose=False)
504
  self.ethical_conflict_chain = LLMChain(llm=groq_fallback_llm, prompt=ethical_conflict_prompt, verbose=False)
505