tomasmcm commited on
Commit
2d2ed27
·
verified ·
1 Parent(s): da94347

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -5,12 +5,14 @@ client = InferenceClient(
5
  "cognitivecomputations/dolphin-2.8-gemma-2b"
6
  )
7
 
 
 
8
  def format_prompt(system, message, history):
9
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
10
  for user_prompt, bot_response in history:
11
  prompt += f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
12
  prompt += f"<|im_start|>assistant\n{bot_response}<|im_end|>\n"
13
- prompt += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
14
  return prompt
15
 
16
  def generate(
@@ -33,7 +35,9 @@ def generate(
33
 
34
  formatted_prompt = format_prompt(system_prompt, prompt, history)
35
  output = client.text_generation(formatted_prompt, **generate_kwargs)
36
- return output
 
 
37
 
38
 
39
  additional_inputs=[
 
5
  "cognitivecomputations/dolphin-2.8-gemma-2b"
6
  )
7
 
8
+ assistantStart = "<|im_start|>assistant\n"
9
+
10
  def format_prompt(system, message, history):
11
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
12
  for user_prompt, bot_response in history:
13
  prompt += f"<|im_start|>user\n{user_prompt}<|im_end|>\n"
14
  prompt += f"<|im_start|>assistant\n{bot_response}<|im_end|>\n"
15
+ prompt += f"<|im_start|>user\n{message}<|im_end|>\n{assistantStart}"
16
  return prompt
17
 
18
  def generate(
 
35
 
36
  formatted_prompt = format_prompt(system_prompt, prompt, history)
37
  output = client.text_generation(formatted_prompt, **generate_kwargs)
38
+ parsed = output.split(assistantStart, 1)
39
+ response = parsed[1] if len(parsed) > 1 else ""
40
+ return response
41
 
42
 
43
  additional_inputs=[