kamran-r123 commited on
Commit
6419c7c
·
1 Parent(s): edcd873

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -5
app.py CHANGED
@@ -6,14 +6,22 @@ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
  def tokenize(text):
7
  return text
8
  # return tok.encode(text, add_special_tokens=False)
9
-
10
  def format_prompt(message, history):
11
- prompt = ""
12
  for user_prompt, bot_response in history:
13
- prompt += "<s>" + tokenize("[INST]") + tokenize(user_prompt) + tokenize("[/INST]")
14
- prompt += tokenize(bot_response) + "</s> "
15
- prompt += tokenize("[INST]") + tokenize(message) + tokenize("[/INST]")
16
  return prompt
 
 
 
 
 
 
 
 
17
 
18
  def generate(prompt, history, temperature=0.2, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
19
  temperature = float(temperature)
 
6
  def tokenize(text):
7
  return text
8
  # return tok.encode(text, add_special_tokens=False)
9
+
10
  def format_prompt(message, history):
11
+ prompt = "<s>"
12
  for user_prompt, bot_response in history:
13
+ prompt += f"[INST] {user_prompt} [/INST]"
14
+ prompt += f" {bot_response}</s> "
15
+ prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
+
18
+ # def format_prompt(message, history):
19
+ # prompt = ""
20
+ # for user_prompt, bot_response in history:
21
+ # prompt += "<s>" + tokenize("[INST]") + tokenize(user_prompt) + tokenize("[/INST]")
22
+ # prompt += tokenize(bot_response) + "</s> "
23
+ # prompt += tokenize("[INST]") + tokenize(message) + tokenize("[/INST]")
24
+ # return prompt
25
 
26
  def generate(prompt, history, temperature=0.2, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0):
27
  temperature = float(temperature)