from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import gradio as gr | |
model = AutoModelForCausalLM.from_pretrained("atsnetwork/my-custom-tinyllama-chatbot") | |
tokenizer = AutoTokenizer.from_pretrained("atsnetwork/my-custom-tinyllama-chatbot") | |
def generate_response(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(**inputs, max_new_tokens=100) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
iface = gr.Interface(fn=generate_response, inputs="text", outputs="text") | |
iface.launch() |