from transformers import AutoModelForCausalLM, AutoTokenizer import gradio as gr import torch title = "EZChat" description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT-medium)" examples = [["How are you?"]] # Set the padding token to be used and initialize the model tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") tokenizer.add_special_tokens({'pad_token': '[EOS]'}) tokenizer.pad_token = tokenizer.eos_token #predict def predict(input, history=[]): # tokenize the new input sentence new_user_input_ids = tokenizer.encode( input + tokenizer.eos_token, return_tensors="pt" ) # append the new user input tokens to the chat history bot_input_ids = torch.cat([torch.tensor(history), new_user_input_ids], dim=-1) if history else new_user_input_ids # generate a response chat_history_ids = model.generate( bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id ) # convert the tokens to text, and then split the responses into lines response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) return response, chat_history_ids.tolist()[0] iface = gr.Interface( fn=predict, title=title, description=description, examples=examples, inputs=["text", gr.inputs.Slider(0, 4000, default=2000, label='Chat History')], outputs=["text", "text"], theme="ParityError/Anime", ) iface.launch()