research14 commited on
Commit
cab4ff3
·
1 Parent(s): 3e98569
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -9,14 +9,26 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
11
  with gr.Blocks() as demo:
12
- with gr.Row():
13
- vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=False)
14
- llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
15
- gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
16
- with gr.Row():
17
- prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
18
- send_button_Chunk = gr.Button("Send", scale=0)
19
- clear = gr.ClearButton([prompt, vicuna_chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  def respond(message, chat_history, chatbot_idx):
22
  input_ids = tokenizer.encode(message, return_tensors="pt")
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
11
  with gr.Blocks() as demo:
12
+ gr.Markdown("<center># LLM Evaluator With Linguistic Scrutiny</center>")
13
+
14
+ with gr.Tab("POS"):
15
+ with gr.Row():
16
+ vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=False)
17
+ llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
18
+ gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
19
+ with gr.Row():
20
+ prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
21
+ send_button_Chunk = gr.Button("Send", scale=0)
22
+ clear = gr.ClearButton([prompt, vicuna_chatbot])
23
+ with gr.Tab("Chunk"):
24
+ with gr.Row():
25
+ vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=False)
26
+ llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
27
+ gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
28
+ with gr.Row():
29
+ prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
30
+ send_button_Chunk = gr.Button("Send", scale=0)
31
+ clear = gr.ClearButton([prompt, vicuna_chatbot])
32
 
33
  def respond(message, chat_history, chatbot_idx):
34
  input_ids = tokenizer.encode(message, return_tensors="pt")