Spaces:
Sleeping
Sleeping
File size: 1,452 Bytes
656bd99 f00ce12 133a6e0 f00ce12 133a6e0 f00ce12 656bd99 f00ce12 133a6e0 f00ce12 656bd99 f00ce12 656bd99 f00ce12 656bd99 f00ce12 656bd99 f00ce12 133a6e0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration, AutoTokenizer, AutoModelForSeq2SeqLM
import os
import requests
# Load environment variable for Hugging Face API token
token = os.getenv("HF_TOKEN")
headers = {"Authorization": f"Bearer {token}"}
# Load summarization model and tokenizer
tokenizer = T5Tokenizer.from_pretrained("sumedh/t5-base-amazonreviews", clean_up_tokenization_spaces=True)
model = T5ForConditionalGeneration.from_pretrained("sumedh/t5-base-amazonreviews")
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
# Translation API details
API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-es"
# Summarization and Translation Function
def texto_sum(text):
# Summarize the input text
summary = summarizer(text, do_sample=False)[0]['summary_text']
# Translate summary using the Hugging Face API
response = requests.post(API_URL, headers=headers, json={"inputs": summary})
translation = response.json()
# Check if translation is successful
if 'error' in translation:
return f"Error in translation: {translation['error']}"
return translation[0]['translation_text']
# Gradio interface
demo = gr.Interface(
fn=texto_sum,
inputs=gr.Textbox(label="Texto a introducir:", placeholder="Introduce el texto a resumir aquí..."),
outputs="text"
)
# Launch the interface
demo.launch() |