Locutusque commited on
Commit
b6f59a7
·
verified ·
1 Parent(s): 6615fb0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -10,6 +10,7 @@ def load_model(model_name):
10
  return pipeline("text-generation", model=model_name, device_map="cuda", torch_dtype=torch.bfloat16, trust_remote_code=True, token=os.environ["token"], use_fast=True)
11
  @spaces.GPU()
12
  def generate(
 
13
  history,
14
  model_name,
15
  system,
@@ -28,7 +29,7 @@ def generate(
28
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
29
  for (user_turn, assistant_turn) in history:
30
  prompt += f"<|im_start|>user\n{user_turn}<|im_end|>\n<|im_start|>assistant\n{assistant_turn}<|im_end|>\n"
31
- prompt += f"<|im_start|>user\n{history[-1][0]}<|im_end|>\n<|im_start|>assistant\n"
32
 
33
  streamer = TextIteratorStreamer(pipe.tokenizer, timeout=240.0, skip_prompt=True, skip_special_tokens=True)
34
  generation_kwargs = dict(
 
10
  return pipeline("text-generation", model=model_name, device_map="cuda", torch_dtype=torch.bfloat16, trust_remote_code=True, token=os.environ["token"], use_fast=True)
11
  @spaces.GPU()
12
  def generate(
13
+ message,
14
  history,
15
  model_name,
16
  system,
 
29
  prompt = f"<|im_start|>system\n{system}<|im_end|>\n"
30
  for (user_turn, assistant_turn) in history:
31
  prompt += f"<|im_start|>user\n{user_turn}<|im_end|>\n<|im_start|>assistant\n{assistant_turn}<|im_end|>\n"
32
+ prompt += f"<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
33
 
34
  streamer = TextIteratorStreamer(pipe.tokenizer, timeout=240.0, skip_prompt=True, skip_special_tokens=True)
35
  generation_kwargs = dict(