Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
|
3 |
-
import os
|
4 |
import requests
|
|
|
5 |
|
6 |
-
#
|
7 |
token = os.getenv("HF_TOKEN")
|
8 |
if not token:
|
9 |
-
raise ValueError("El token no se
|
10 |
|
11 |
# Configuración de los headers para la API con el token
|
12 |
API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-es"
|
13 |
headers = {"Authorization": f"Bearer {token}"}
|
14 |
|
15 |
# Cargar el modelo y el tokenizador de resumen
|
16 |
-
tokenizer = T5Tokenizer.from_pretrained("sumedh/t5-base-amazonreviews", clean_up_tokenization_spaces=True)
|
17 |
model = T5ForConditionalGeneration.from_pretrained("sumedh/t5-base-amazonreviews")
|
18 |
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
|
19 |
|
@@ -39,18 +39,5 @@ demo = gr.Interface(
|
|
39 |
outputs="text"
|
40 |
)
|
41 |
|
42 |
-
# Probar el token en una solicitud simple para verificar su validez
|
43 |
-
def test_translation_api():
|
44 |
-
test_response = requests.post(API_URL, headers=headers, json={"inputs": "Hello, how are you?"})
|
45 |
-
response_data = test_response.json()
|
46 |
-
|
47 |
-
# Comprobar si hay errores en la respuesta
|
48 |
-
if 'error' in response_data:
|
49 |
-
raise ValueError(f"Error en la API de traducción: {response_data['error']}")
|
50 |
-
print("Token válido y API accesible. Respuesta de prueba:", response_data)
|
51 |
-
|
52 |
-
# Ejecutar la prueba de token
|
53 |
-
test_translation_api()
|
54 |
-
|
55 |
# Lanzar la interfaz
|
56 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
|
|
|
3 |
import requests
|
4 |
+
import os
|
5 |
|
6 |
+
# Cargar el token de Hugging Face del secreto configurado en el espacio
|
7 |
token = os.getenv("HF_TOKEN")
|
8 |
if not token:
|
9 |
+
raise ValueError("El token no se configuró correctamente en las variables de entorno del Espacio.")
|
10 |
|
11 |
# Configuración de los headers para la API con el token
|
12 |
API_URL = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-es"
|
13 |
headers = {"Authorization": f"Bearer {token}"}
|
14 |
|
15 |
# Cargar el modelo y el tokenizador de resumen
|
16 |
+
tokenizer = T5Tokenizer.from_pretrained("sumedh/t5-base-amazonreviews", clean_up_tokenization_spaces=True, legacy=False)
|
17 |
model = T5ForConditionalGeneration.from_pretrained("sumedh/t5-base-amazonreviews")
|
18 |
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
|
19 |
|
|
|
39 |
outputs="text"
|
40 |
)
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Lanzar la interfaz
|
43 |
+
demo.launch()
|