Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,41 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
import torch
|
|
|
4 |
|
5 |
-
#
|
|
|
|
|
|
|
|
|
6 |
model_name = "distilgpt2"
|
7 |
try:
|
|
|
8 |
generator = pipeline(
|
9 |
"text-generation",
|
10 |
model=model_name,
|
11 |
-
device=-1, #
|
12 |
framework="pt",
|
13 |
max_length=512,
|
14 |
-
truncation=True
|
|
|
15 |
)
|
|
|
16 |
except Exception as e:
|
17 |
-
|
18 |
exit(1)
|
19 |
|
20 |
def respond(message, history, max_tokens=256, temperature=0.7, top_p=0.9):
|
21 |
history = history or []
|
22 |
-
# Формируем входной текст
|
23 |
input_text = ""
|
24 |
for user_msg, bot_msg in history:
|
25 |
input_text += f"User: {user_msg}\nAssistant: {bot_msg}\n"
|
26 |
input_text += f"User: {message}"
|
27 |
|
28 |
-
# Генерация ответа
|
29 |
try:
|
|
|
30 |
outputs = generator(
|
31 |
input_text,
|
32 |
max_length=max_tokens,
|
@@ -34,17 +43,17 @@ def respond(message, history, max_tokens=256, temperature=0.7, top_p=0.9):
|
|
34 |
top_p=top_p,
|
35 |
do_sample=True,
|
36 |
no_repeat_ngram_size=2,
|
37 |
-
pad_token_id=generator.tokenizer.eos_token_id,
|
38 |
num_return_sequences=1
|
39 |
)
|
40 |
response = outputs[0]["generated_text"][len(input_text):].strip()
|
|
|
41 |
except Exception as e:
|
42 |
-
|
|
|
43 |
|
44 |
# Форматируем ответ
|
45 |
formatted_response = format_response(response)
|
46 |
history.append((message, formatted_response))
|
47 |
-
|
48 |
return formatted_response, history
|
49 |
|
50 |
def format_response(response):
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
import torch
|
4 |
+
import logging
|
5 |
|
6 |
+
# Настройка логирования для диагностики
|
7 |
+
logging.basicConfig(level=logging.INFO)
|
8 |
+
logger = logging.getLogger(__name__)
|
9 |
+
|
10 |
+
# Загружаем модель через pipeline (локально из Hugging Face Hub)
|
11 |
model_name = "distilgpt2"
|
12 |
try:
|
13 |
+
logger.info(f"Попытка загрузки модели {model_name}...")
|
14 |
generator = pipeline(
|
15 |
"text-generation",
|
16 |
model=model_name,
|
17 |
+
device=-1, # CPU для бесплатного Spaces
|
18 |
framework="pt",
|
19 |
max_length=512,
|
20 |
+
truncation=True,
|
21 |
+
model_kwargs={"torch_dtype": torch.float32} # Указываем тип данных для совместимости
|
22 |
)
|
23 |
+
logger.info("Модель успешно загружена.")
|
24 |
except Exception as e:
|
25 |
+
logger.error(f"Ошибка загрузки модели: {e}")
|
26 |
exit(1)
|
27 |
|
28 |
def respond(message, history, max_tokens=256, temperature=0.7, top_p=0.9):
|
29 |
history = history or []
|
30 |
+
# Формируем входной текст
|
31 |
input_text = ""
|
32 |
for user_msg, bot_msg in history:
|
33 |
input_text += f"User: {user_msg}\nAssistant: {bot_msg}\n"
|
34 |
input_text += f"User: {message}"
|
35 |
|
36 |
+
# Генерация ответа
|
37 |
try:
|
38 |
+
logger.info(f"Генерация ответа для: {message}")
|
39 |
outputs = generator(
|
40 |
input_text,
|
41 |
max_length=max_tokens,
|
|
|
43 |
top_p=top_p,
|
44 |
do_sample=True,
|
45 |
no_repeat_ngram_size=2,
|
|
|
46 |
num_return_sequences=1
|
47 |
)
|
48 |
response = outputs[0]["generated_text"][len(input_text):].strip()
|
49 |
+
logger.info(f"Ответ сгенерирован: {response}")
|
50 |
except Exception as e:
|
51 |
+
logger.error(f"Ошибка генерации ответа: {e}")
|
52 |
+
return f"Ошибка генерации: {e}", history
|
53 |
|
54 |
# Форматируем ответ
|
55 |
formatted_response = format_response(response)
|
56 |
history.append((message, formatted_response))
|
|
|
57 |
return formatted_response, history
|
58 |
|
59 |
def format_response(response):
|