File size: 3,988 Bytes
a63edec
 
 
 
4a6aa55
 
 
a63edec
 
 
 
 
 
 
 
 
 
 
4a6aa55
 
 
 
 
 
 
 
 
 
 
 
 
a63edec
 
 
4a6aa55
 
 
 
 
 
 
a63edec
4a6aa55
 
 
 
 
 
a63edec
 
 
4a6aa55
 
 
 
a63edec
4a6aa55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63edec
 
 
 
4a6aa55
 
a63edec
 
 
 
 
4a6aa55
a63edec
 
4a6aa55
a63edec
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
from transformers import pipeline
from transformers import T5Tokenizer, T5ForConditionalGeneration
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from openai_model import *
from bard_model import *
from llm_model import *
import tensorflow
import torch
import random
import time
import os


global default_model_name
default_model_name = "google/flan-t5-base"


def predict(input_text, input_prompt, input_model1, input_model2, input_model3):
    if input_model1!="":
        model_name = input_model1
        response_text = llm_predict(input_text, model_name)
        return response_text
    elif input_model2!="":
        model_name = input_model2
        response_text = openai_predict(input_text, input_prompt, model_name)
        return response_text
    else:
        model_name = input_model3
        response_text = bard_predict(input_text, model_name)
        return response_text



def select_choice(choice):
    if choice == "Custom Large Language Model (LLM)":
        return [gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)]
    elif choice == "OpenAI 'GPT-3.5-Turbo' Model":
        return [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)]
    else:
        return [gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)]

input_model1 = gr.Textbox(label="Enter LLM name:")
input_model2 = gr.Textbox(label="Enter OpenAI API KEY:")
input_model3 = gr.Textbox(label="Enter Google Bard API KEY:")


with gr.Blocks(theme=gr.themes.Glass(primary_hue="blue", secondary_hue="sky", neutral_hue="stone")) as demo:
    gr.Markdown(
    """
    # Chatbot to interact with different Large Language Models (LLMs) 
    [Here](https://huggingface.co/models?pipeline_tag=text2text-generation) are some popular custom text2text large lamguage models.  
    For example:  **"google/flan-t5-base"** 
    OR Your **OPEN AI API KEY** to chat with **ChatGPT** 
    OR Your **Google Bard API KEY** to chat with **Google Bard**
    """)
    input_models = gr.Radio(choices=["Custom Large Language Model (LLM)", "OpenAI 'GPT-3.5-Turbo' Model", 
    "Google Bard Model"], label="Please Select which model you want to use:", 
    value="Custom Large Language Model (LLM):")

    with gr.Row(visible=True) as row:
        with gr.Column(visible=True) as col:
            with gr.Row(visible=True) as rowA:        
                input_model1.render()
            with gr.Row(visible=False) as rowB:
                input_model2.render()
            with gr.Row(visible=False) as rowC:
                input_model3.render()
    
   
    chatbot = gr.Chatbot(height=300, label="A chatbot to interact with LLM", avatar_images=((os.path.join(os.path.dirname(__file__), "user.png")), (os.path.join(os.path.dirname(__file__), "bot.png"))))
    user_input = gr.Textbox(label="Enter a message:")
    clear = gr.ClearButton(value="Clear")
    custom_prompt = gr.Textbox(label="Enter a custom prompt to use:", lines=15, max_lines=20, placeholder="In just 150 words summarize this text...")
    
    input_models.change(fn=select_choice, inputs=input_models, outputs=[rowA, rowB, rowC], queue=False) 
    
    def user(user_message, chat_history):
        return "", chat_history + [[user_message, None]]

    def respond(chat_history, custom_prompt, input_model1, input_model2, input_model3):
        bot_message = predict(chat_history[-1][0], custom_prompt, input_model1, input_model2, input_model3)
        chat_history[-1][1] = bot_message
        time.sleep(2)
        return chat_history
    
    user_input.submit(user, [user_input, chatbot], [user_input, chatbot], queue=False).then(
                    respond, [chatbot, custom_prompt, input_model1, input_model2, input_model3], chatbot
    )
    
    clear.click(lambda: [None, None, None, None, None, None], outputs=[user_input, chatbot, custom_prompt, input_model1, input_model2, input_model3], queue=False)

demo.queue()
demo.launch()