Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
from transformers import T5Tokenizer, T5ForConditionalGeneration | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
import tensorflow | |
import torch | |
import random | |
import time | |
import os | |
global default_model_name | |
default_model_name = "google/flan-t5-base" | |
def predict(input_text, model_name): | |
if model_name == "": | |
model_name = default_model_name | |
pipe = pipeline("text2text-generation", model=model_name) | |
generated_text = pipe(input_text, max_new_tokens=1000) | |
return generated_text[0]['generated_text'] | |
with gr.Blocks() as demo: | |
gr.Markdown( | |
""" | |
# Chatbot to interact with different Large Language Models (LLMs) | |
[Here](https://huggingface.co/models?pipeline_tag=text2text-generation) are some popular text2text large lamguage models. | |
Or use default model **"google/flan-t5-base"** | |
""") | |
input_model = gr.Textbox(label="Enter a custom Large Language Model name (LLM):") | |
chatbot = gr.Chatbot(height=300, label="A chatbot to interact with llm", avatar_images=((os.path.join(os.path.dirname(__file__), "user.png")), (os.path.join(os.path.dirname(__file__), "bot.png")))) | |
user_input = gr.Textbox() | |
clear = gr.ClearButton([user_input, chatbot, input_model]) | |
def user(user_message, chat_history): | |
return "", chat_history + [[user_message, None]] | |
def respond(chat_history, input_model): | |
bot_message = predict(chat_history[-1][0], input_model) | |
chat_history[-1][1] = bot_message | |
time.sleep(2) | |
return chat_history | |
user_input.submit(user, [user_input, chatbot], [user_input, chatbot], queue=False).then( | |
respond, [chatbot, input_model], chatbot | |
) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
demo.queue() | |
demo.launch() |