pabloce commited on
Commit
2cd3649
·
verified ·
1 Parent(s): 246d0fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -5,7 +5,7 @@ import gradio as gr
5
  from huggingface_hub import hf_hub_download
6
 
7
  subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
8
- subprocess.run('pip install llama-cpp-agent==0.2.8', shell=True)
9
 
10
  hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", filename="mistral-7b-instruct-v0.2.Q6_K.gguf", local_dir = "./models")
11
 
@@ -54,7 +54,7 @@ def respond(
54
  print(dic)
55
  messages.add_message(dic)
56
 
57
- stream = agent.get_chat_response(message, llm_sampling_settings=settings, returns_streaming_generator=True, print_output=False)
58
 
59
  outputs = ""
60
  for output in stream:
 
5
  from huggingface_hub import hf_hub_download
6
 
7
  subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
8
+ subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
9
 
10
  hf_hub_download(repo_id="TheBloke/Mistral-7B-Instruct-v0.2-GGUF", filename="mistral-7b-instruct-v0.2.Q6_K.gguf", local_dir = "./models")
11
 
 
54
  print(dic)
55
  messages.add_message(dic)
56
 
57
+ stream = agent.get_chat_response(message, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True, print_output=False)
58
 
59
  outputs = ""
60
  for output in stream: