Spaces:
Sleeping
Sleeping
Commit
·
54a85fe
1
Parent(s):
d74c18f
main.py updated
Browse files
main.py
CHANGED
@@ -37,7 +37,7 @@ n_batch = 512 # Should be between 1 and n_ctx, consider the amount of VRAM in y
|
|
37 |
|
38 |
# Make sure the model path is correct for your system!
|
39 |
llm = LlamaCpp(
|
40 |
-
model_path="
|
41 |
n_gpu_layers=n_gpu_layers,
|
42 |
n_batch=n_batch,
|
43 |
callback_manager=callback_manager,
|
|
|
37 |
|
38 |
# Make sure the model path is correct for your system!
|
39 |
llm = LlamaCpp(
|
40 |
+
model_path="Phi-3-mini-4k-instruct-q4.gguf",
|
41 |
n_gpu_layers=n_gpu_layers,
|
42 |
n_batch=n_batch,
|
43 |
callback_manager=callback_manager,
|