research14 commited on
Commit
d4e59c1
·
1 Parent(s): df3b804
Files changed (1) hide show
  1. app.py +10 -1
app.py CHANGED
@@ -8,6 +8,14 @@ model_name = "lmsys/vicuna-7b-v1.3"
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
 
 
 
 
 
 
 
 
11
  with gr.Blocks() as demo:
12
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
13
 
@@ -60,6 +68,7 @@ with gr.Blocks() as demo:
60
  time.sleep(2)
61
  return "", chat_history
62
 
63
- prompt.submit(respond, [prompt, vicuna_chatbot1, vicuna_chatbot1_chunk])
 
64
 
65
  demo.launch()
 
8
  model = AutoModelForCausalLM.from_pretrained(model_name)
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
11
+ def respond_vicuna(message, chat_history, vicuna_chatbot):
12
+ input_ids = tokenizer.encode(message, return_tensors="pt")
13
+ output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
14
+ bot_message = tokenizer.decode(output[0], skip_special_tokens=True)
15
+ chat_history.append((message, bot_message))
16
+ time.sleep(2)
17
+ return "", chat_history
18
+
19
  with gr.Blocks() as demo:
20
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
21
 
 
68
  time.sleep(2)
69
  return "", chat_history
70
 
71
+ # Replace the old respond function with the new general function for Vicuna
72
+ prompt.submit(lambda message, chat_history: respond_vicuna(message, chat_history, vicuna_chatbot1), [prompt, vicuna_chatbot1, vicuna_chatbot1_chunk])
73
 
74
  demo.launch()