Spaces:
Running
Running
fix model id
Browse files
app.py
CHANGED
|
@@ -3,8 +3,8 @@ from llama_cpp import Llama
|
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
|
| 5 |
hf_hub_download(
|
| 6 |
-
repo_id="
|
| 7 |
-
filename="
|
| 8 |
local_dir="./models",
|
| 9 |
)
|
| 10 |
|
|
@@ -12,7 +12,7 @@ hf_hub_download(
|
|
| 12 |
@st.cache_resource
|
| 13 |
def load_model():
|
| 14 |
return Llama(
|
| 15 |
-
model_path="models/
|
| 16 |
n_ctx=2048,
|
| 17 |
n_threads=6,
|
| 18 |
n_batch=8,
|
|
|
|
| 3 |
from huggingface_hub import hf_hub_download
|
| 4 |
|
| 5 |
hf_hub_download(
|
| 6 |
+
repo_id="Qwen/Qwen2.5-3B-Instruct-GGUF",
|
| 7 |
+
filename="qwen2.5-3b-instruct-q4_k_m.gguf",
|
| 8 |
local_dir="./models",
|
| 9 |
)
|
| 10 |
|
|
|
|
| 12 |
@st.cache_resource
|
| 13 |
def load_model():
|
| 14 |
return Llama(
|
| 15 |
+
model_path="models/qwen2.5-3b-instruct-q4_k_m.gguf",
|
| 16 |
n_ctx=2048,
|
| 17 |
n_threads=6,
|
| 18 |
n_batch=8,
|