Spaces:
Sleeping
Sleeping
Update private_gpt/components/llm/llm_component.py
Browse files
private_gpt/components/llm/llm_component.py
CHANGED
@@ -25,13 +25,15 @@ class LLMComponent:
|
|
25 |
|
26 |
match settings.llm.mode:
|
27 |
case "local":
|
|
|
28 |
from llama_index.llms import LlamaCPP
|
29 |
prompt_style_cls = get_prompt_style(settings.local.prompt_style)
|
30 |
prompt_style = prompt_style_cls(
|
31 |
default_system_prompt=settings.local.default_system_prompt
|
32 |
)
|
33 |
self.llm = LlamaCPP(
|
34 |
-
model_path=str(models_path / settings.local.llm_hf_model_file),
|
|
|
35 |
temperature=0.1,
|
36 |
max_new_tokens=settings.llm.max_new_tokens,
|
37 |
context_window=3900,
|
|
|
25 |
|
26 |
match settings.llm.mode:
|
27 |
case "local":
|
28 |
+
model_url: "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/raw/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf"
|
29 |
from llama_index.llms import LlamaCPP
|
30 |
prompt_style_cls = get_prompt_style(settings.local.prompt_style)
|
31 |
prompt_style = prompt_style_cls(
|
32 |
default_system_prompt=settings.local.default_system_prompt
|
33 |
)
|
34 |
self.llm = LlamaCPP(
|
35 |
+
#model_path=str(models_path / settings.local.llm_hf_model_file),
|
36 |
+
model_url= model_url,
|
37 |
temperature=0.1,
|
38 |
max_new_tokens=settings.llm.max_new_tokens,
|
39 |
context_window=3900,
|