qwen2-5-demo / app.py
marceloelectrocyber's picture
Update app.py
cee5573 verified
raw
history blame contribute delete
630 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Cargar modelo Qwen2.5
model_name = "Qwen/Qwen1.5-0.5B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float32)
# Función de respuesta
def chat(message):
inputs = tokenizer(message, return_tensors="pt")
output = model.generate(**inputs, max_new_tokens=50)
return tokenizer.decode(output[0], skip_special_tokens=True)
# Interfaz Gradio
iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Chat con Qwen2.5")
iface.launch()