Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,27 +14,6 @@ openai_api_key = os.getenv('GPT_KEY')
|
|
14 |
gc_key = os.getenv('GC_KEY')
|
15 |
token = os.getenv('GITHUB_TOKEN')
|
16 |
|
17 |
-
gpt4o_temp = 1.0
|
18 |
-
gigachat_pro_temp = 0.87
|
19 |
-
gigachat_lite_temp = 0.87
|
20 |
-
gigachat_plus_temp = 0.87
|
21 |
-
|
22 |
-
def set_gpt4o_temp(value):
|
23 |
-
global gpt4o_temp
|
24 |
-
gpt4o_temp = value
|
25 |
-
|
26 |
-
def set_gigachat_pro_temp(value):
|
27 |
-
global gigachat_pro_temp
|
28 |
-
gigachat_pro_temp = value
|
29 |
-
|
30 |
-
def set_gigachat_lite_temp(value):
|
31 |
-
global gigachat_lite_temp
|
32 |
-
gigachat_lite_temp = value
|
33 |
-
|
34 |
-
def set_gigachat_plus_temp(value):
|
35 |
-
global gigachat_plus_temp
|
36 |
-
gigachat_plus_temp = value
|
37 |
-
|
38 |
# Авторизация в сервисе GigaChat
|
39 |
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, verify_ssl_certs=False)
|
40 |
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, verify_ssl_certs=False)
|
@@ -107,7 +86,7 @@ def generate_message_gpt4o(prompt, temperature=1):
|
|
107 |
"model": "chatgpt-4o-latest",
|
108 |
"messages": [{"role": "system", "content": prompt}],
|
109 |
"max_tokens": 101,
|
110 |
-
"temperature":
|
111 |
}
|
112 |
response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers)
|
113 |
response_data = response.json()
|
@@ -125,10 +104,10 @@ def clean_message(message):
|
|
125 |
return message
|
126 |
|
127 |
# Обновленные функции генерации сообщений с учетом обрезки незаконченных предложений
|
128 |
-
def generate_message_gigachat_pro(prompt):
|
129 |
try:
|
130 |
messages = [SystemMessage(content=prompt)]
|
131 |
-
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, temperature=
|
132 |
res = chat_pro(messages)
|
133 |
cleaned_message = clean_message(res.content.strip())
|
134 |
return cleaned_message
|
@@ -139,7 +118,7 @@ def generate_message_gigachat_lite(prompt, temperature=0.87):
|
|
139 |
try:
|
140 |
time.sleep(2)
|
141 |
messages = [SystemMessage(content=prompt)]
|
142 |
-
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, temperature=
|
143 |
res = chat_lite(messages)
|
144 |
cleaned_message = clean_message(res.content.strip())
|
145 |
return cleaned_message
|
@@ -150,7 +129,7 @@ def generate_message_gigachat_plus(prompt, temperature=0.87):
|
|
150 |
try:
|
151 |
time.sleep(2)
|
152 |
messages = [SystemMessage(content=prompt)]
|
153 |
-
chat_plus = GigaChat(credentials=gc_key, model='GigaChat-Plus', max_tokens=68, temperature=
|
154 |
res = chat_plus(messages)
|
155 |
cleaned_message = clean_message(res.content.strip())
|
156 |
return cleaned_message
|
@@ -423,17 +402,10 @@ with gr.Blocks() as demo:
|
|
423 |
gr.Markdown("# Генерация SMS-сообщений по заданным признакам")
|
424 |
|
425 |
# Добавление элементов управления температурой для каждой модели
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
gigachat_pro_slider.change(set_gigachat_pro_temp, inputs=gigachat_pro_slider, outputs=None)
|
431 |
-
|
432 |
-
gigachat_lite_slider = gr.Slider(label="GigaChat-Lite: temperature", minimum=0, maximum=2, step=0.01, value=gigachat_lite_temp)
|
433 |
-
gigachat_lite_slider.change(set_gigachat_lite_temp, inputs=gigachat_lite_slider, outputs=None)
|
434 |
-
|
435 |
-
gigachat_plus_slider = gr.Slider(label="GigaChat-Plus: temperature", minimum=0, maximum=2, step=0.01, value=gigachat_plus_temp)
|
436 |
-
gigachat_plus_slider.change(set_gigachat_plus_temp, inputs=gigachat_plus_slider, outputs=None)
|
437 |
|
438 |
with gr.Row():
|
439 |
with gr.Column(scale=1):
|
@@ -480,10 +452,10 @@ with gr.Blocks() as demo:
|
|
480 |
description_input,
|
481 |
advantages_input,
|
482 |
*selections,
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
],
|
488 |
outputs=[prompt_display, output_text_gpt4o, output_text_gigachat_pro, output_text_gigachat_lite, output_text_gigachat_plus]
|
489 |
)
|
@@ -546,7 +518,7 @@ with gr.Blocks() as demo:
|
|
546 |
selections[3], # Стадия бизнеса
|
547 |
selections[4], # Отрасль
|
548 |
selections[5], # ОПФ
|
549 |
-
|
550 |
],
|
551 |
outputs=None
|
552 |
)
|
|
|
14 |
gc_key = os.getenv('GC_KEY')
|
15 |
token = os.getenv('GITHUB_TOKEN')
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# Авторизация в сервисе GigaChat
|
18 |
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, verify_ssl_certs=False)
|
19 |
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, verify_ssl_certs=False)
|
|
|
86 |
"model": "chatgpt-4o-latest",
|
87 |
"messages": [{"role": "system", "content": prompt}],
|
88 |
"max_tokens": 101,
|
89 |
+
"temperature": temperature # Передача температуры
|
90 |
}
|
91 |
response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers)
|
92 |
response_data = response.json()
|
|
|
104 |
return message
|
105 |
|
106 |
# Обновленные функции генерации сообщений с учетом обрезки незаконченных предложений
|
107 |
+
def generate_message_gigachat_pro(prompt, temperature=0.87):
|
108 |
try:
|
109 |
messages = [SystemMessage(content=prompt)]
|
110 |
+
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, temperature=temperature, verify_ssl_certs=False)
|
111 |
res = chat_pro(messages)
|
112 |
cleaned_message = clean_message(res.content.strip())
|
113 |
return cleaned_message
|
|
|
118 |
try:
|
119 |
time.sleep(2)
|
120 |
messages = [SystemMessage(content=prompt)]
|
121 |
+
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, temperature=temperature, verify_ssl_certs=False)
|
122 |
res = chat_lite(messages)
|
123 |
cleaned_message = clean_message(res.content.strip())
|
124 |
return cleaned_message
|
|
|
129 |
try:
|
130 |
time.sleep(2)
|
131 |
messages = [SystemMessage(content=prompt)]
|
132 |
+
chat_plus = GigaChat(credentials=gc_key, model='GigaChat-Plus', max_tokens=68, temperature=temperature, verify_ssl_certs=False)
|
133 |
res = chat_plus(messages)
|
134 |
cleaned_message = clean_message(res.content.strip())
|
135 |
return cleaned_message
|
|
|
402 |
gr.Markdown("# Генерация SMS-сообщений по заданным признакам")
|
403 |
|
404 |
# Добавление элементов управления температурой для каждой модели
|
405 |
+
gpt4o_temperature = gr.Slider(label="GPT-4o: temperature", minimum=0, maximum=2, step=0.01, value=1)
|
406 |
+
gigachat_pro_temperature = gr.Slider(label="GigaChat-Pro: temperature", minimum=0, maximum=2, step=0.01, value=0.87)
|
407 |
+
gigachat_lite_temperature = gr.Slider(label="GigaChat-Lite: temperature", minimum=0, maximum=2, step=0.01, value=0.87)
|
408 |
+
gigachat_plus_temperature = gr.Slider(label="GigaChat-Plus: temperature", minimum=0, maximum=2, step=0.01, value=0.87)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
409 |
|
410 |
with gr.Row():
|
411 |
with gr.Column(scale=1):
|
|
|
452 |
description_input,
|
453 |
advantages_input,
|
454 |
*selections,
|
455 |
+
gpt4o_temperature,
|
456 |
+
gigachat_pro_temperature,
|
457 |
+
gigachat_lite_temperature,
|
458 |
+
gigachat_plus_temperature
|
459 |
],
|
460 |
outputs=[prompt_display, output_text_gpt4o, output_text_gigachat_pro, output_text_gigachat_lite, output_text_gigachat_plus]
|
461 |
)
|
|
|
518 |
selections[3], # Стадия бизнеса
|
519 |
selections[4], # Отрасль
|
520 |
selections[5], # ОПФ
|
521 |
+
gpt4o_temperature # Передача температуры
|
522 |
],
|
523 |
outputs=None
|
524 |
)
|