|
import gradio as gr |
|
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM |
|
from transformers import T5Tokenizer |
|
import torch |
|
|
|
|
|
simplifier_model_name = "mrm8488/t5-base-finetuned-summarize-news" |
|
simplifier_tokenizer = AutoTokenizer.from_pretrained(simplifier_model_name) |
|
simplifier_model = AutoModelForSeq2SeqLM.from_pretrained(simplifier_model_name) |
|
|
|
def simplificar_texto(texto, nivel): |
|
niveles = { |
|
"Bajo": "Rephrase this text with slightly simpler words:", |
|
"Medio": "Rephrase this text in a simple and clear way:", |
|
"Alto": "Rephrase this text in very simple and easy-to-understand words for a person with cognitive difficulties:" |
|
} |
|
prompt = f"{niveles[nivel]}\n\n{texto}" |
|
inputs = simplifier_tokenizer(prompt, return_tensors="pt", truncation=True) |
|
outputs = simplifier_model.generate( |
|
**inputs, |
|
max_new_tokens=120, |
|
num_beams=4, |
|
temperature=0.7, |
|
repetition_penalty=1.2, |
|
early_stopping=True |
|
) |
|
resultado = simplifier_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return resultado |
|
|
|
|
|
predictor_model_name = "distilgpt2" |
|
predictor_tokenizer = AutoTokenizer.from_pretrained(predictor_model_name) |
|
predictor_model = AutoModelForCausalLM.from_pretrained(predictor_model_name) |
|
|
|
def predecir_texto(texto_inicial): |
|
inputs = predictor_tokenizer.encode(texto_inicial, return_tensors="pt") |
|
outputs = predictor_model.generate(inputs, max_new_tokens=20, do_sample=True, top_k=50) |
|
texto_generado = predictor_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return texto_generado[len(texto_inicial):] |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## 🧠 Chatbot Simplificador y Teclado Predictivo") |
|
|
|
with gr.Tab("Simplificación de texto"): |
|
gr.Markdown("Introduce un texto complejo y obtén una versión más sencilla.") |
|
entrada_simplificar = gr.Textbox(label="Texto original", lines=4, placeholder="Ej. Un párrafo de un documento legal...") |
|
nivel_dropdown = gr.Dropdown(choices=["Bajo", "Medio", "Alto"], label="Nivel de simplicidad", value="Medio") |
|
boton_simplificar = gr.Button("Simplificar") |
|
salida_simplificar = gr.Textbox(label="Texto simplificado") |
|
|
|
boton_simplificar.click( |
|
fn=simplificar_texto, |
|
inputs=[entrada_simplificar, nivel_dropdown], |
|
outputs=salida_simplificar |
|
) |
|
|
|
|
|
with gr.Tab("Texto Predictivo"): |
|
gr.Markdown("Escribe el inicio de una frase y recibe sugerencias.") |
|
entrada_predecir = gr.Textbox(label="Frase incompleta", placeholder="Ej. Me gustaría ir a la...") |
|
salida_predecir = gr.Textbox(label="Sugerencia") |
|
boton_predecir = gr.Button("Predecir") |
|
boton_predecir.click(fn=predecir_texto, inputs=entrada_predecir, outputs=salida_predecir) |
|
|
|
demo.launch() |
|
|