JohnKouf's picture
Update app.py
155660f verified
raw
history blame
1.29 kB
import gradio as gr
from transformers import pipeline
from transformers import AutoTokenizer
from transformers import AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained("kriton/greek-text-summarization")
model = AutoModelForSeq2SeqLM.from_pretrained("kriton/greek-text-summarization")
generator = pipeline("summarization", model="kriton/greek-text-summarization")
def genarate_summary(article):
inputs = tokenizer(
'summarize: ' + article,
return_tensors="pt",
max_length=1024,
truncation=True,
padding="max_length",
)
outputs = model.generate(
inputs["input_ids"],
max_length=512,
min_length=130,
length_penalty=3.0,
num_beams=8,
early_stopping=True,
repetition_penalty=3.0,
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
def generate_text(prompt):
response = generator(generate_summary(prompt), max_length=512, num_return_sequences=1)
return response[0]["generated_text"]
iface = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="Remote LLM Text Generation",
description="Enter a prompt to generate text from the model."
)
# Launch the Gradio interface
iface.launch(share=True)