pabloce commited on
Commit
0fd9e08
·
verified ·
1 Parent(s): 2bd7c02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -7,7 +7,7 @@ from huggingface_hub import hf_hub_download
7
  subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
8
  subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
9
 
10
- hf_hub_download(repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF", filename="Meta-Llama-3-70B-Instruct-Q4_K_M.gguf", local_dir = "./models")
11
 
12
  @spaces.GPU(duration=120)
13
  def respond(
@@ -26,7 +26,7 @@ def respond(
26
  from llama_cpp_agent.chat_history.messages import Roles
27
 
28
  llm = Llama(
29
- model_path="models/Meta-Llama-3-70B-Instruct-Q4_K_M.gguf",
30
  n_gpu_layers=81,
31
  )
32
  provider = LlamaCppPythonProvider(llm)
 
7
  subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
8
  subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
9
 
10
+ hf_hub_download(repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF", filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf", local_dir = "./models")
11
 
12
  @spaces.GPU(duration=120)
13
  def respond(
 
26
  from llama_cpp_agent.chat_history.messages import Roles
27
 
28
  llm = Llama(
29
+ model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
30
  n_gpu_layers=81,
31
  )
32
  provider = LlamaCppPythonProvider(llm)