Elouarn commited on
Commit
b30941e
·
verified ·
1 Parent(s): 0fd93f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -3,16 +3,21 @@ import gradio as gr
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
- def format_prompt(message, history):
7
  prompt = "<s>"
 
 
 
 
8
  for user_prompt, bot_response in history:
9
  prompt += f"[INST] {user_prompt} [/INST]"
10
  prompt += f" {bot_response}</s> "
11
  prompt += f"[INST] {message} [/INST]"
 
12
  return prompt
13
 
14
  def generate(
15
- prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
16
  ):
17
  temperature = float(temperature)
18
  if temperature < 1e-2:
@@ -28,7 +33,7 @@ def generate(
28
  seed=42,
29
  )
30
 
31
- formatted_prompt = format_prompt(prompt, history)
32
 
33
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
34
  output = ""
@@ -40,7 +45,7 @@ def generate(
40
 
41
 
42
  mychatbot = gr.Chatbot(
43
- avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
44
 
45
  demo = gr.ChatInterface(fn=generate,
46
  chatbot=mychatbot,
@@ -49,4 +54,4 @@ demo = gr.ChatInterface(fn=generate,
49
  undo_btn=None
50
  )
51
 
52
- demo.queue().launch(show_api=True)
 
3
 
4
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
5
 
6
+ def format_prompt(preprompt, message, history):
7
  prompt = "<s>"
8
+
9
+ if preprompt:
10
+ prompt += f"[INST] {preprompt} [/INST]"
11
+
12
  for user_prompt, bot_response in history:
13
  prompt += f"[INST] {user_prompt} [/INST]"
14
  prompt += f" {bot_response}</s> "
15
  prompt += f"[INST] {message} [/INST]"
16
+
17
  return prompt
18
 
19
  def generate(
20
+ prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, preprompt=None
21
  ):
22
  temperature = float(temperature)
23
  if temperature < 1e-2:
 
33
  seed=42,
34
  )
35
 
36
+ formatted_prompt = format_prompt(preprompt, prompt, history)
37
 
38
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
39
  output = ""
 
45
 
46
 
47
  mychatbot = gr.Chatbot(
48
+ avatar_images=["./berger.jpg", "./tavernier.jpg"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
49
 
50
  demo = gr.ChatInterface(fn=generate,
51
  chatbot=mychatbot,
 
54
  undo_btn=None
55
  )
56
 
57
+ demo.serve(preprompt="Bienvenue dans ma taverne!").launch(show_api=True)