File size: 3,487 Bytes
b308128
8c245db
9272cb4
f9ca505
b308128
7eaa7b0
 
 
 
04fc021
8c245db
28ca6ce
cab4ff3
 
d366b82
 
2acec65
 
 
b4af604
 
cab4ff3
df3b804
 
 
 
 
 
 
 
 
 
 
 
 
b4af604
df3b804
b4af604
cab4ff3
d366b82
 
2acec65
 
 
b4af604
df3b804
 
 
 
 
 
 
 
 
 
 
cab4ff3
df3b804
 
006127c
b4af604
df3b804
ac4f141
9272cb4
 
 
 
 
 
 
2ba8da5
 
 
9272cb4
 
2ba8da5
ac4f141
9272cb4
2ba8da5
5e8be56
8c245db
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
import random
import time
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load Vicuna 7B model and tokenizer
model_name = "lmsys/vicuna-7b-v1.3"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

with gr.Blocks() as demo:
    gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")

    with gr.Tab("POS"):
        gr.Markdown(" Description ")

        with gr.Row():
            prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
            send_button_POS = gr.Button("Send", scale=0)

        gr.Markdown("Strategy 1 QA")
        with gr.Row():
            vicuna_chatbot1 = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot1 = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot1 = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 2 Instruction")
        with gr.Row():
            vicuna_chatbot2 = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot2 = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot2 = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 3 Structured Prompting")
        with gr.Row():
            vicuna_chatbot3 = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot3 = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot3 = gr.Chatbot(label="gpt-3.5", live=False)
        
        clear = gr.ClearButton([prompt, vicuna_chatbot1])
        
    with gr.Tab("Chunk"):
        gr.Markdown(" Description 2 ")

        with gr.Row():
            prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
            send_button_Chunk = gr.Button("Send", scale=0)

        gr.Markdown("Strategy 1 QA")
        with gr.Row():
            vicuna_chatbot1_chunk = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot1_chunk = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot1_chunk = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 2 Instruction")
        with gr.Row():
            vicuna_chatbot2_chunk = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot2_chunk = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot2_chunk = gr.Chatbot(label="gpt-3.5", live=False)
        gr.Markdown("Strategy 3 Structured Prompting")
        with gr.Row():
            vicuna_chatbot3_chunk = gr.Chatbot(label="vicuna-7b", live=True)
            llama_chatbot3_chunk = gr.Chatbot(label="llama-7b", live=False)
            gpt_chatbot3_chunk = gr.Chatbot(label="gpt-3.5", live=False)
        
        clear = gr.ClearButton([prompt_chunk, vicuna_chatbot1_chunk])

    # Define the function for generating responses
    def generate_response(model, tokenizer, prompt):
        inputs = tokenizer(prompt, return_tensors="pt")
        outputs = model.generate(**inputs, max_length=500, pad_token_id=tokenizer.eos_token_id)
        response = tokenizer.decode(outputs[0])
        return response

    # Define the Gradio interface
    def chatbot_interface(prompt):
        vicuna_response = generate_response(model, tokenizer, prompt)
        # llama_response = generate_response(llama_model, llama_tokenizer, prompt)

        return {"Vicuna-7B": vicuna_response}

    # Replace the old respond function with the new general function for Vicuna
    prompt.submit(chatbot_interface, [prompt, vicuna_chatbot1, vicuna_chatbot1_chunk])

demo.launch()