Spaces:
Sleeping
Sleeping
Commit
·
ce33049
1
Parent(s):
bd4544e
Update src/backend/chatbot.py
Browse files- src/backend/chatbot.py +2 -2
src/backend/chatbot.py
CHANGED
@@ -50,7 +50,7 @@ def init_llm(model, demo_lite):
|
|
50 |
generate_kwargs={},
|
51 |
# kwargs to pass to __init__()
|
52 |
# set to at least 1 to use GPU
|
53 |
-
model_kwargs={"n_gpu_layers":
|
54 |
# transform inputs into Llama2 format
|
55 |
messages_to_prompt=messages_to_prompt,
|
56 |
completion_to_prompt=completion_to_prompt,
|
@@ -73,7 +73,7 @@ def init_llm(model, demo_lite):
|
|
73 |
generate_kwargs={},
|
74 |
# kwargs to pass to __init__()
|
75 |
# set to at least 1 to use GPU
|
76 |
-
model_kwargs={"n_gpu_layers":
|
77 |
# transform inputs into Llama2 format
|
78 |
# messages_to_prompt=messages_to_prompt,
|
79 |
# completion_to_prompt=completion_to_prompt,
|
|
|
50 |
generate_kwargs={},
|
51 |
# kwargs to pass to __init__()
|
52 |
# set to at least 1 to use GPU
|
53 |
+
model_kwargs={"n_gpu_layers": 16},
|
54 |
# transform inputs into Llama2 format
|
55 |
messages_to_prompt=messages_to_prompt,
|
56 |
completion_to_prompt=completion_to_prompt,
|
|
|
73 |
generate_kwargs={},
|
74 |
# kwargs to pass to __init__()
|
75 |
# set to at least 1 to use GPU
|
76 |
+
model_kwargs={"n_gpu_layers": 16},
|
77 |
# transform inputs into Llama2 format
|
78 |
# messages_to_prompt=messages_to_prompt,
|
79 |
# completion_to_prompt=completion_to_prompt,
|