Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -14,6 +14,27 @@ openai_api_key = os.getenv('GPT_KEY')
|
|
14 |
gc_key = os.getenv('GC_KEY')
|
15 |
token = os.getenv('GITHUB_TOKEN')
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# Авторизация в сервисе GigaChat
|
18 |
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, verify_ssl_certs=False)
|
19 |
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, verify_ssl_certs=False)
|
@@ -86,7 +107,7 @@ def generate_message_gpt4o(prompt, temperature=1):
|
|
86 |
"model": "chatgpt-4o-latest",
|
87 |
"messages": [{"role": "system", "content": prompt}],
|
88 |
"max_tokens": 101,
|
89 |
-
"temperature":
|
90 |
}
|
91 |
response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers)
|
92 |
response_data = response.json()
|
@@ -104,10 +125,10 @@ def clean_message(message):
|
|
104 |
return message
|
105 |
|
106 |
# Обновленные функции генерации сообщений с учетом обрезки незаконченных предложений
|
107 |
-
def generate_message_gigachat_pro(prompt
|
108 |
try:
|
109 |
messages = [SystemMessage(content=prompt)]
|
110 |
-
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, temperature=
|
111 |
res = chat_pro(messages)
|
112 |
cleaned_message = clean_message(res.content.strip())
|
113 |
return cleaned_message
|
@@ -118,7 +139,7 @@ def generate_message_gigachat_lite(prompt, temperature=0.87):
|
|
118 |
try:
|
119 |
time.sleep(2)
|
120 |
messages = [SystemMessage(content=prompt)]
|
121 |
-
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, temperature=
|
122 |
res = chat_lite(messages)
|
123 |
cleaned_message = clean_message(res.content.strip())
|
124 |
return cleaned_message
|
@@ -129,7 +150,7 @@ def generate_message_gigachat_plus(prompt, temperature=0.87):
|
|
129 |
try:
|
130 |
time.sleep(2)
|
131 |
messages = [SystemMessage(content=prompt)]
|
132 |
-
chat_plus = GigaChat(credentials=gc_key, model='GigaChat-Plus', max_tokens=68, temperature=
|
133 |
res = chat_plus(messages)
|
134 |
cleaned_message = clean_message(res.content.strip())
|
135 |
return cleaned_message
|
@@ -402,10 +423,10 @@ with gr.Blocks() as demo:
|
|
402 |
gr.Markdown("# Генерация SMS-сообщений по заданным признакам")
|
403 |
|
404 |
# Добавление элементов управления температурой для каждой модели
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
|
410 |
with gr.Row():
|
411 |
with gr.Column(scale=1):
|
|
|
14 |
gc_key = os.getenv('GC_KEY')
|
15 |
token = os.getenv('GITHUB_TOKEN')
|
16 |
|
17 |
+
gpt4o_temp = 1.0
|
18 |
+
gigachat_pro_temp = 0.87
|
19 |
+
gigachat_lite_temp = 0.87
|
20 |
+
gigachat_plus_temp = 0.87
|
21 |
+
|
22 |
+
def set_gpt4o_temp(value):
|
23 |
+
global gpt4o_temp
|
24 |
+
gpt4o_temp = value
|
25 |
+
|
26 |
+
def set_gigachat_pro_temp(value):
|
27 |
+
global gigachat_pro_temp
|
28 |
+
gigachat_pro_temp = value
|
29 |
+
|
30 |
+
def set_gigachat_lite_temp(value):
|
31 |
+
global gigachat_lite_temp
|
32 |
+
gigachat_lite_temp = value
|
33 |
+
|
34 |
+
def set_gigachat_plus_temp(value):
|
35 |
+
global gigachat_plus_temp
|
36 |
+
gigachat_plus_temp = value
|
37 |
+
|
38 |
# Авторизация в сервисе GigaChat
|
39 |
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, verify_ssl_certs=False)
|
40 |
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, verify_ssl_certs=False)
|
|
|
107 |
"model": "chatgpt-4o-latest",
|
108 |
"messages": [{"role": "system", "content": prompt}],
|
109 |
"max_tokens": 101,
|
110 |
+
"temperature": gpt4o_temp # Передача температуры
|
111 |
}
|
112 |
response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers)
|
113 |
response_data = response.json()
|
|
|
125 |
return message
|
126 |
|
127 |
# Обновленные функции генерации сообщений с учетом обрезки незаконченных предложений
|
128 |
+
def generate_message_gigachat_pro(prompt):
|
129 |
try:
|
130 |
messages = [SystemMessage(content=prompt)]
|
131 |
+
chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, temperature=gigachat_pro_temp, verify_ssl_certs=False)
|
132 |
res = chat_pro(messages)
|
133 |
cleaned_message = clean_message(res.content.strip())
|
134 |
return cleaned_message
|
|
|
139 |
try:
|
140 |
time.sleep(2)
|
141 |
messages = [SystemMessage(content=prompt)]
|
142 |
+
chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, temperature=gigachat_lite_temp, verify_ssl_certs=False)
|
143 |
res = chat_lite(messages)
|
144 |
cleaned_message = clean_message(res.content.strip())
|
145 |
return cleaned_message
|
|
|
150 |
try:
|
151 |
time.sleep(2)
|
152 |
messages = [SystemMessage(content=prompt)]
|
153 |
+
chat_plus = GigaChat(credentials=gc_key, model='GigaChat-Plus', max_tokens=68, temperature=gigachat_plus_temp, verify_ssl_certs=False)
|
154 |
res = chat_plus(messages)
|
155 |
cleaned_message = clean_message(res.content.strip())
|
156 |
return cleaned_message
|
|
|
423 |
gr.Markdown("# Генерация SMS-сообщений по заданным признакам")
|
424 |
|
425 |
# Добавление элементов управления температурой для каждой модели
|
426 |
+
gpt4o_slider = gr.Slider(label="GPT-4o: temperature", minimum=0, maximum=2, step=0.01, value=1, change=set_gpt4o_temp)
|
427 |
+
gigachat_pro_slider = gr.Slider(label="GigaChat-Pro: temperature", minimum=0, maximum=2, step=0.01, value=0.87, change=set_gigachat_pro_temp)
|
428 |
+
gigachat_lite_slider = gr.Slider(label="GigaChat-Lite: temperature", minimum=0, maximum=2, step=0.01, value=0.87, change=set_gigachat_lite_temp)
|
429 |
+
gigachat_plus_slider = gr.Slider(label="GigaChat-Plus: temperature", minimum=0, maximum=2, step=0.01, value=0.87, change=set_gigachat_plus_temp)
|
430 |
|
431 |
with gr.Row():
|
432 |
with gr.Column(scale=1):
|