|
import gradio as gr |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
import torch |
|
|
|
|
|
device = torch.device("cpu") |
|
|
|
|
|
print("Cargando modelo DistilGPT-2...") |
|
model_name = "distilgpt2" |
|
tokenizer = GPT2Tokenizer.from_pretrained(model_name) |
|
model = GPT2LMHeadModel.from_pretrained(model_name) |
|
|
|
|
|
model.to(device) |
|
model.eval() |
|
|
|
|
|
if tokenizer.pad_token is None: |
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
def autocomplete_text(input_text, max_tokens=20): |
|
""" |
|
Autocompleta el texto de entrada usando DistilGPT-2 |
|
|
|
Args: |
|
input_text (str): Texto inicial a completar |
|
max_tokens (int): Número máximo de tokens a generar |
|
|
|
Returns: |
|
str: Solo la parte nueva generada (sin el input original) |
|
""" |
|
if not input_text.strip(): |
|
return "Por favor, ingresa algún texto para completar." |
|
|
|
try: |
|
|
|
inputs = tokenizer.encode(input_text, return_tensors="pt", padding=True) |
|
inputs = inputs.to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model.generate( |
|
inputs, |
|
max_new_tokens=max_tokens, |
|
num_return_sequences=1, |
|
temperature=0.7, |
|
do_sample=True, |
|
pad_token_id=tokenizer.eos_token_id, |
|
eos_token_id=tokenizer.eos_token_id, |
|
attention_mask=torch.ones_like(inputs) |
|
) |
|
|
|
|
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
new_text = generated_text[len(input_text):].strip() |
|
|
|
if not new_text: |
|
return "No se pudo generar texto adicional." |
|
|
|
return new_text |
|
|
|
except Exception as e: |
|
return f"Error al generar texto: {str(e)}" |
|
|
|
def create_autocomplete_interface(): |
|
""" |
|
Crea la interfaz de autocompletar dentro de gr.Blocks() |
|
""" |
|
|
|
with gr.Blocks(title="Autocompletar Texto") as demo: |
|
|
|
gr.Markdown("# 🤖 Autocompletar Texto") |
|
gr.Markdown("Escribe el inicio de una frase y la IA la completará por ti.") |
|
|
|
with gr.Tab("Autocompletar"): |
|
with gr.Row(): |
|
with gr.Column(): |
|
input_textbox = gr.Textbox( |
|
label="Texto a completar", |
|
placeholder="Escribe el inicio de tu frase aquí...", |
|
lines=3, |
|
max_lines=5 |
|
) |
|
|
|
generate_btn = gr.Button("Completar Texto", variant="primary") |
|
|
|
with gr.Column(): |
|
output_textbox = gr.Textbox( |
|
label="Texto generado", |
|
placeholder="Aquí aparecerá la continuación...", |
|
lines=3, |
|
max_lines=5, |
|
interactive=False |
|
) |
|
|
|
|
|
generate_btn.click( |
|
fn=autocomplete_text, |
|
inputs=[input_textbox], |
|
outputs=[output_textbox] |
|
) |
|
|
|
|
|
input_textbox.submit( |
|
fn=autocomplete_text, |
|
inputs=[input_textbox], |
|
outputs=[output_textbox] |
|
) |
|
|
|
|
|
with gr.Tab("Ejemplos"): |
|
gr.Markdown(""" |
|
### Ejemplos de uso: |
|
|
|
**Entrada:** "El clima de hoy está" |
|
**Salida:** "muy agradable y soleado" |
|
|
|
**Entrada:** "Me gusta mucho" |
|
**Salida:** "pasar tiempo con mi familia" |
|
|
|
**Entrada:** "Para hacer una buena comida necesitas" |
|
**Salida:** "ingredientes frescos y mucha paciencia" |
|
""") |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
print("Iniciando aplicación de autocompletar...") |
|
|
|
|
|
app = create_autocomplete_interface() |
|
|
|
|
|
app.launch( |
|
share=False, |
|
server_name="0.0.0.0", |
|
server_port=7860, |
|
show_error=True, |
|
debug=False |
|
) |