Ibraaheem commited on
Commit
62bc489
·
1 Parent(s): b22594e

Update private_gpt/components/llm/llm_component.py

Browse files
private_gpt/components/llm/llm_component.py CHANGED
@@ -70,39 +70,39 @@ class LLMComponent:
70
  self.llm = OpenAI(model="gpt-3.5-turbo", api_key=os.environ.get("OPENAI_API_KEY"))
71
 
72
 
73
- def switch_model(new_model: str) -> None:
74
- nonlocal self
75
- from llama_index.llms import LlamaCPP
76
- openai_settings = settings.openai.api_key
77
-
78
- if new_model == "gpt-3.5-turbo":
79
- self.llm = OpenAI(model="gpt-3.5-turbo", api_key=os.environ.get("OPENAI_API_KEY"))
80
- logger.info("Initializing the LLM Model in=%s", "gpt-3.5-turbo")
81
-
82
- elif new_model == "gpt-4":
83
- # Initialize with the new model
84
- self.llm = OpenAI(model="gpt-4", api_key=os.environ.get("OPENAI_API_KEY"))
85
- logger.info("Initializing the LLM Model in=%s", "gpt-4")
86
 
87
-
88
- elif new_model == "mistral-7B":
89
- prompt_style_cls = get_prompt_style(settings.local.prompt_style)
90
- prompt_style = prompt_style_cls(
91
- default_system_prompt=settings.local.default_system_prompt
92
- )
93
- self.llm = LlamaCPP(
94
- #model_path=str(models_path / settings.local.llm_hf_model_file),
95
- model_url= "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf?download=true",
96
- temperature=0.1,
97
- max_new_tokens=settings.llm.max_new_tokens,
98
- context_window=3900,
99
- generate_kwargs={},
100
- model_kwargs={"n_gpu_layers": -1},
101
- messages_to_prompt=prompt_style.messages_to_prompt,
102
- completion_to_prompt=prompt_style.completion_to_prompt,
103
- verbose=True,
104
- )
105
- logger.info("Initializing the LLM Model in=%s", "Mistral-7B")
 
 
 
 
 
106
 
107
  def switch_to_model(self, new_model: str):
108
  if self.llm_mode == "dynamic":
 
70
  self.llm = OpenAI(model="gpt-3.5-turbo", api_key=os.environ.get("OPENAI_API_KEY"))
71
 
72
 
73
+ def switch_model(new_model: str) -> None:
74
+ nonlocal self
75
+ from llama_index.llms import LlamaCPP
76
+ openai_settings = settings.openai.api_key
77
+
78
+ if new_model == "gpt-3.5-turbo":
79
+ self.llm = OpenAI(model="gpt-3.5-turbo", api_key=os.environ.get("OPENAI_API_KEY"))
80
+ logger.info("Initializing the LLM Model in=%s", "gpt-3.5-turbo")
 
 
 
 
 
81
 
82
+ elif new_model == "gpt-4":
83
+ # Initialize with the new model
84
+ self.llm = OpenAI(model="gpt-4", api_key=os.environ.get("OPENAI_API_KEY"))
85
+ logger.info("Initializing the LLM Model in=%s", "gpt-4")
86
+
87
+
88
+ elif new_model == "mistral-7B":
89
+ prompt_style_cls = get_prompt_style(settings.local.prompt_style)
90
+ prompt_style = prompt_style_cls(
91
+ default_system_prompt=settings.local.default_system_prompt
92
+ )
93
+ self.llm = LlamaCPP(
94
+ #model_path=str(models_path / settings.local.llm_hf_model_file),
95
+ model_url= "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf?download=true",
96
+ temperature=0.1,
97
+ max_new_tokens=settings.llm.max_new_tokens,
98
+ context_window=3900,
99
+ generate_kwargs={},
100
+ model_kwargs={"n_gpu_layers": -1},
101
+ messages_to_prompt=prompt_style.messages_to_prompt,
102
+ completion_to_prompt=prompt_style.completion_to_prompt,
103
+ verbose=True,
104
+ )
105
+ logger.info("Initializing the LLM Model in=%s", "Mistral-7B")
106
 
107
  def switch_to_model(self, new_model: str):
108
  if self.llm_mode == "dynamic":