pabloce commited on
Commit
ac70b49
·
verified ·
1 Parent(s): b9838b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -1
app.py CHANGED
@@ -22,6 +22,7 @@ def respond(
22
  from llama_cpp_agent import LlamaCppAgent
23
  from llama_cpp_agent import MessagesFormatterType
24
  from llama_cpp_agent.providers import LlamaCppPythonProvider
 
25
 
26
  llm = Llama(
27
  model_path="models/mistral-7b-instruct-v0.2.Q6_K.gguf",
@@ -39,8 +40,18 @@ def respond(
39
  settings = provider.get_provider_default_settings()
40
  settings.max_tokens = 2000
41
  settings.stream = True
 
 
 
 
 
 
 
 
 
 
42
 
43
- stream = agent.get_chat_response(message, llm_sampling_settings=settings, returns_streaming_generator=True)
44
 
45
  outputs = ""
46
  for output in stream:
 
22
  from llama_cpp_agent import LlamaCppAgent
23
  from llama_cpp_agent import MessagesFormatterType
24
  from llama_cpp_agent.providers import LlamaCppPythonProvider
25
+ from llama_cpp_agent.chat_history import BasicChatHistory
26
 
27
  llm = Llama(
28
  model_path="models/mistral-7b-instruct-v0.2.Q6_K.gguf",
 
40
  settings = provider.get_provider_default_settings()
41
  settings.max_tokens = 2000
42
  settings.stream = True
43
+
44
+ messages = BasicChatHistory()
45
+ print(history)
46
+
47
+ for msn in history:
48
+ dic = {
49
+ 'role': msn[0]
50
+ 'content': msn[1]
51
+ }
52
+ messages.add_message(dic)
53
 
54
+ stream = agent.get_chat_response(message, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True)
55
 
56
  outputs = ""
57
  for output in stream: