Akjava commited on
Commit
5e8e544
·
verified ·
1 Parent(s): 3ce5ec4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -88,7 +88,7 @@ def respond(
88
  model_id = "ggml-model-Q6_K.gguf"
89
  llama = Llama(f"models/{model_id}",flash_attn=False,
90
  n_gpu_layers=0,
91
- n_ctx=max_tokens
92
  n_threads=2,
93
  n_threads_batch=2)
94
 
 
88
  model_id = "ggml-model-Q6_K.gguf"
89
  llama = Llama(f"models/{model_id}",flash_attn=False,
90
  n_gpu_layers=0,
91
+ n_ctx=max_tokens,
92
  n_threads=2,
93
  n_threads_batch=2)
94