Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
import torch | |
# Cargar el modelo | |
model_name = "CeciGonSer/translation_espa_pure_biblia_bart-large-cnn" # Reemplaza esto por tu modelo en Hugging Face | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
def translate(text): | |
inputs = tokenizer(text, return_tensors="pt", padding=True) | |
with torch.no_grad(): | |
translated = model.generate(**inputs) | |
translation = tokenizer.decode(translated[0], skip_special_tokens=True) | |
return translation | |
# Interfaz Gradio | |
interface = gr.Interface( | |
fn=translate, | |
inputs=gr.Textbox(label="Introduce texto en purépecha"), | |
outputs=gr.Textbox(label="Traducción al español") | |
) | |
interface.launch() |