Update app/llm.py
Browse files- app/llm.py +2 -2
app/llm.py
CHANGED
|
@@ -31,7 +31,7 @@ class ChatModel(BaseModel):
|
|
| 31 |
llm_chat = llama_cpp.Llama.from_pretrained(
|
| 32 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
| 33 |
filename="*q4_0.gguf",
|
| 34 |
-
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("
|
| 35 |
verbose=False,
|
| 36 |
n_ctx=512,
|
| 37 |
n_gpu_layers=0,
|
|
@@ -40,7 +40,7 @@ llm_chat = llama_cpp.Llama.from_pretrained(
|
|
| 40 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
| 41 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
| 42 |
filename="*q4_0.gguf",
|
| 43 |
-
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("
|
| 44 |
verbose=False,
|
| 45 |
n_ctx=4096,
|
| 46 |
n_gpu_layers=0,
|
|
|
|
| 31 |
llm_chat = llama_cpp.Llama.from_pretrained(
|
| 32 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
| 33 |
filename="*q4_0.gguf",
|
| 34 |
+
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat"),
|
| 35 |
verbose=False,
|
| 36 |
n_ctx=512,
|
| 37 |
n_gpu_layers=0,
|
|
|
|
| 40 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
| 41 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
| 42 |
filename="*q4_0.gguf",
|
| 43 |
+
tokenizer=llama_cpp.llama_tokenizer.LlamaHFTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B-Chat-GGUF"),
|
| 44 |
verbose=False,
|
| 45 |
n_ctx=4096,
|
| 46 |
n_gpu_layers=0,
|