research14 commited on
Commit
df3b804
·
1 Parent(s): 28ca6ce

strategies

Browse files
Files changed (1) hide show
  1. app.py +32 -10
app.py CHANGED
@@ -12,25 +12,47 @@ with gr.Blocks() as demo:
12
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
13
 
14
  with gr.Tab("POS"):
 
15
  with gr.Row():
16
- vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=True)
17
- llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
18
- gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
 
 
 
 
 
 
 
 
 
 
19
  with gr.Row():
20
  prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
21
  send_button_POS = gr.Button("Send", scale=0)
22
- clear = gr.ClearButton([prompt, vicuna_chatbot])
23
  with gr.Tab("Chunk"):
 
 
 
 
 
 
 
 
 
 
 
24
  with gr.Row():
25
- vicuna_chatbot_chunk = gr.Chatbot(label="vicuna-7b", live=True)
26
- llama_chatbot_chunk = gr.Chatbot(label="llama-7b", live=False)
27
- gpt_chatbot_chunk = gr.Chatbot(label="gpt-3.5", live=False)
28
  with gr.Row():
29
  prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
30
  send_button_Chunk = gr.Button("Send", scale=0)
31
- clear = gr.ClearButton([prompt_chunk, vicuna_chatbot_chunk])
32
 
33
- def respond(message, chat_history, chatbot):
34
  input_ids = tokenizer.encode(message, return_tensors="pt")
35
  output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
36
  bot_message = tokenizer.decode(output[0], skip_special_tokens=True)
@@ -38,6 +60,6 @@ with gr.Blocks() as demo:
38
  time.sleep(2)
39
  return "", chat_history
40
 
41
- prompt.submit(respond, [prompt, vicuna_chatbot, vicuna_chatbot_chunk])
42
 
43
  demo.launch()
 
12
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
13
 
14
  with gr.Tab("POS"):
15
+ gr.Markdown("Strategy 1 QA")
16
  with gr.Row():
17
+ vicuna_chatbot1 = gr.Chatbot(label="vicuna-7b", live=True)
18
+ llama_chatbot1 = gr.Chatbot(label="llama-7b", live=False)
19
+ gpt_chatbot1 = gr.Chatbot(label="gpt-3.5", live=False)
20
+ gr.Markdown("Strategy 2 Instruction")
21
+ with gr.Row():
22
+ vicuna_chatbot2 = gr.Chatbot(label="vicuna-7b", live=True)
23
+ llama_chatbot2 = gr.Chatbot(label="llama-7b", live=False)
24
+ gpt_chatbot2 = gr.Chatbot(label="gpt-3.5", live=False)
25
+ gr.Markdown("Strategy 3 Structured Prompting")
26
+ with gr.Row():
27
+ vicuna_chatbot3 = gr.Chatbot(label="vicuna-7b", live=True)
28
+ llama_chatbot3 = gr.Chatbot(label="llama-7b", live=False)
29
+ gpt_chatbot3 = gr.Chatbot(label="gpt-3.5", live=False)
30
  with gr.Row():
31
  prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
32
  send_button_POS = gr.Button("Send", scale=0)
33
+ clear = gr.ClearButton([prompt, vicuna_chatbot1])
34
  with gr.Tab("Chunk"):
35
+ gr.Markdown("Strategy 1 QA")
36
+ with gr.Row():
37
+ vicuna_chatbot1_chunk = gr.Chatbot(label="vicuna-7b", live=True)
38
+ llama_chatbot1_chunk = gr.Chatbot(label="llama-7b", live=False)
39
+ gpt_chatbot1_chunk = gr.Chatbot(label="gpt-3.5", live=False)
40
+ gr.Markdown("Strategy 2 Instruction")
41
+ with gr.Row():
42
+ vicuna_chatbot2_chunk = gr.Chatbot(label="vicuna-7b", live=True)
43
+ llama_chatbot2_chunk = gr.Chatbot(label="llama-7b", live=False)
44
+ gpt_chatbot2_chunk = gr.Chatbot(label="gpt-3.5", live=False)
45
+ gr.Markdown("Strategy 3 Structured Prompting")
46
  with gr.Row():
47
+ vicuna_chatbot3_chunk = gr.Chatbot(label="vicuna-7b", live=True)
48
+ llama_chatbot3_chunk = gr.Chatbot(label="llama-7b", live=False)
49
+ gpt_chatbot3_chunk = gr.Chatbot(label="gpt-3.5", live=False)
50
  with gr.Row():
51
  prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
52
  send_button_Chunk = gr.Button("Send", scale=0)
53
+ clear = gr.ClearButton([prompt_chunk, vicuna_chatbot1_chunk])
54
 
55
+ def respond(message, chat_history):
56
  input_ids = tokenizer.encode(message, return_tensors="pt")
57
  output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
58
  bot_message = tokenizer.decode(output[0], skip_special_tokens=True)
 
60
  time.sleep(2)
61
  return "", chat_history
62
 
63
+ prompt.submit(respond, [prompt, vicuna_chatbot1, vicuna_chatbot1_chunk])
64
 
65
  demo.launch()