Spaces:
Sleeping
Sleeping
Update pages/Project.py
Browse files- pages/Project.py +6 -8
pages/Project.py
CHANGED
|
@@ -7,17 +7,12 @@ import os
|
|
| 7 |
import re
|
| 8 |
|
| 9 |
# Установка API URL и заголовков
|
| 10 |
-
API_URL_gen = "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill"
|
| 11 |
API_URL_tra = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-ru"
|
| 12 |
API_URL_key = "https://api-inference.huggingface.co/models/ml6team/keyphrase-extraction-kbir-inspec"
|
| 13 |
API_URL_sum = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
|
| 14 |
|
| 15 |
headers = {"Authorization": os.getenv("api_token")}
|
| 16 |
|
| 17 |
-
# Функция для генерирования предложения
|
| 18 |
-
def generate_example(payload):
|
| 19 |
-
response = requests.post(API_URL_gen, headers=headers, json=payload)
|
| 20 |
-
return response.json()
|
| 21 |
|
| 22 |
# Функция для получения ключевых слов
|
| 23 |
def get_key_words(payload):
|
|
@@ -66,8 +61,11 @@ key_words_list = []
|
|
| 66 |
if button_start:
|
| 67 |
|
| 68 |
with st.spinner('...'):
|
|
|
|
| 69 |
summary_text = make_summary({"inputs": text_from_tarea})
|
| 70 |
col2.text_area('Конспект статьи', height=500, value=summary_text[0]['summary_text'])
|
|
|
|
|
|
|
| 71 |
kew_words = get_key_words({ "inputs": text_from_tarea,})
|
| 72 |
for key_word in kew_words :
|
| 73 |
key_words_list.append(key_word['word'].lower())
|
|
@@ -75,19 +73,19 @@ if button_start:
|
|
| 75 |
sorted_keywords = set(sorted(key_words_list))
|
| 76 |
sorted_keywords = clean_list(sorted_keywords)
|
| 77 |
|
|
|
|
| 78 |
translated_words_list = []
|
| 79 |
for key_word in sorted_keywords:
|
| 80 |
res = translate_key_words({"inputs": key_word,})
|
| 81 |
translated_words_list.append(res[0]['translation_text'])
|
| 82 |
|
|
|
|
| 83 |
cleaned_words_list_ru = clean_list(translated_words_list)
|
| 84 |
-
|
| 85 |
cards_list = []
|
| 86 |
for item1, item2 in zip(sorted_keywords, cleaned_words_list_ru):
|
| 87 |
cards_list.append([item1, item2])
|
| 88 |
|
| 89 |
-
|
| 90 |
-
#cards_df = pd.DataFrame(cards_list, columns=['word', 'translated', 'example'])
|
| 91 |
st.success('Готово')
|
| 92 |
|
| 93 |
# Выводим Word Cloud
|
|
|
|
| 7 |
import re
|
| 8 |
|
| 9 |
# Установка API URL и заголовков
|
|
|
|
| 10 |
API_URL_tra = "https://api-inference.huggingface.co/models/Helsinki-NLP/opus-mt-en-ru"
|
| 11 |
API_URL_key = "https://api-inference.huggingface.co/models/ml6team/keyphrase-extraction-kbir-inspec"
|
| 12 |
API_URL_sum = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
|
| 13 |
|
| 14 |
headers = {"Authorization": os.getenv("api_token")}
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
# Функция для получения ключевых слов
|
| 18 |
def get_key_words(payload):
|
|
|
|
| 61 |
if button_start:
|
| 62 |
|
| 63 |
with st.spinner('...'):
|
| 64 |
+
# Составляем конспект
|
| 65 |
summary_text = make_summary({"inputs": text_from_tarea})
|
| 66 |
col2.text_area('Конспект статьи', height=500, value=summary_text[0]['summary_text'])
|
| 67 |
+
|
| 68 |
+
# Извлекаем ключевые слова
|
| 69 |
kew_words = get_key_words({ "inputs": text_from_tarea,})
|
| 70 |
for key_word in kew_words :
|
| 71 |
key_words_list.append(key_word['word'].lower())
|
|
|
|
| 73 |
sorted_keywords = set(sorted(key_words_list))
|
| 74 |
sorted_keywords = clean_list(sorted_keywords)
|
| 75 |
|
| 76 |
+
# Переводим ключевые слова
|
| 77 |
translated_words_list = []
|
| 78 |
for key_word in sorted_keywords:
|
| 79 |
res = translate_key_words({"inputs": key_word,})
|
| 80 |
translated_words_list.append(res[0]['translation_text'])
|
| 81 |
|
| 82 |
+
# Создаем карточки
|
| 83 |
cleaned_words_list_ru = clean_list(translated_words_list)
|
|
|
|
| 84 |
cards_list = []
|
| 85 |
for item1, item2 in zip(sorted_keywords, cleaned_words_list_ru):
|
| 86 |
cards_list.append([item1, item2])
|
| 87 |
|
| 88 |
+
|
|
|
|
| 89 |
st.success('Готово')
|
| 90 |
|
| 91 |
# Выводим Word Cloud
|