pabloce commited on
Commit
6e7a7b9
·
verified ·
1 Parent(s): ddf5fad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -1,12 +1,14 @@
1
  import spaces
2
  import json
3
  import subprocess
 
 
 
 
 
4
  import gradio as gr
5
  from huggingface_hub import hf_hub_download
6
 
7
- subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
8
- subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
9
-
10
  hf_hub_download(
11
  repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
12
  filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
@@ -62,12 +64,6 @@ def respond(
62
  repeat_penalty,
63
  model,
64
  ):
65
- from llama_cpp import Llama
66
- from llama_cpp_agent import LlamaCppAgent
67
- from llama_cpp_agent.providers import LlamaCppPythonProvider
68
- from llama_cpp_agent.chat_history import BasicChatHistory
69
- from llama_cpp_agent.chat_history.messages import Roles
70
-
71
  chat_template = get_messages_formatter_type(model)
72
 
73
  llm = Llama(
 
1
  import spaces
2
  import json
3
  import subprocess
4
+ from llama_cpp import Llama
5
+ from llama_cpp_agent import LlamaCppAgent
6
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
7
+ from llama_cpp_agent.chat_history import BasicChatHistory
8
+ from llama_cpp_agent.chat_history.messages import Roles
9
  import gradio as gr
10
  from huggingface_hub import hf_hub_download
11
 
 
 
 
12
  hf_hub_download(
13
  repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
14
  filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
 
64
  repeat_penalty,
65
  model,
66
  ):
 
 
 
 
 
 
67
  chat_template = get_messages_formatter_type(model)
68
 
69
  llm = Llama(