pabloce commited on
Commit
6c9b3f7
·
verified ·
1 Parent(s): 20f7e9c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -21
app.py CHANGED
@@ -1,18 +1,21 @@
1
  import spaces
2
  import gradio as gr
3
- from llama_cpp import Llama
4
- from llama_cpp_agent import LlamaCppAgent
5
- from llama_cpp_agent import MessagesFormatterType
6
- from llama_cpp_agent.providers import LlamaCppPythonProvider
7
 
8
  """
9
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
10
  """
11
  # client = InferenceClient("cognitivecomputations/dolphin-2.8-mistral-7b-v02")
12
- llama_model = Llama(r"Meta-Llama-3-8B.Q5_K_M.gguf", n_batch=1024, n_threads=4, n_gpu_layers=33, n_ctx=8192, verbose=False)
13
-
14
- provider = LlamaCppPythonProvider(llama_model)
15
 
 
 
 
 
 
 
 
16
 
17
  @spaces.GPU
18
  def respond(
@@ -23,21 +26,34 @@ def respond(
23
  temperature,
24
  top_p,
25
  ):
26
- agent = LlamaCppAgent(
27
- provider,
28
- system_prompt=system_message,
29
- predefined_messages_formatter_type=MessagesFormatterType.LLAMA_3,
30
- debug_output=True
31
- )
32
 
33
- settings = provider.get_provider_default_settings()
 
 
 
34
 
35
- settings.max_tokens = max_tokens
36
- settings.temperature = temperature
37
- settings.top_p = top_p
38
-
39
- agent_output = agent.get_chat_response(message, llm_sampling_settings=settings)
40
- yield agent_output.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  """
43
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
@@ -45,7 +61,7 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
48
- gr.Textbox(value="You are a helpful assistant.", label="System message"),
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
  gr.Slider(
 
1
  import spaces
2
  import gradio as gr
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
5
+ from threading import Thread
 
6
 
7
  """
8
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
9
  """
10
  # client = InferenceClient("cognitivecomputations/dolphin-2.8-mistral-7b-v02")
 
 
 
11
 
12
+ def format_prompt(message, history):
13
+ prompt = "<s>"
14
+ for user_prompt, bot_response in history:
15
+ prompt += f"[INST] {user_prompt} [/INST]"
16
+ prompt += f" {bot_response}</s> "
17
+ prompt += f"[INST] {message} [/INST]"
18
+ return prompt
19
 
20
  @spaces.GPU
21
  def respond(
 
26
  temperature,
27
  top_p,
28
  ):
29
+ torch.set_default_device("cuda")
 
 
 
 
 
30
 
31
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.2", torch_dtype="auto",load_in_4bit=True,trust_remote_code=True)
32
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.2", trust_remote_code=True)
33
+
34
+ history_transformer_format = history + [[message, ""]]
35
 
36
+ messages = system_prompt + "".join(["".join(["\n[INST]" + item[0], "[/INST]\n" + item[1] + "</s>"]) for item in history_transformer_format])
37
+ input_ids = tokenizer([messages], return_tensors="pt").to('cuda')
38
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
39
+ generate_kwargs = dict(
40
+ input_ids,
41
+ streamer=streamer,
42
+ max_new_tokens=max_tokens,
43
+ do_sample=True,
44
+ top_p=top_p,
45
+ top_k=50,
46
+ temperature=temperature,
47
+ num_beams=1
48
+ )
49
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
50
+ t.start()
51
+ partial_message = ""
52
+ for new_token in streamer:
53
+ partial_message += new_token
54
+ if '<|im_end|>' in partial_message:
55
+ break
56
+ yield partial_message
57
 
58
  """
59
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
61
  demo = gr.ChatInterface(
62
  respond,
63
  additional_inputs=[
64
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
65
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
66
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
67
  gr.Slider(