fruitpicker01 commited on
Commit
010d2bb
·
verified ·
1 Parent(s): cf81687

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -25,9 +25,9 @@ client = Together(api_key=TOGETHER_API_KEY)
25
  client_mistral = Mistral(api_key=MISTRAL_API_KEY)
26
 
27
  # Авторизация в сервисе GigaChat
28
- chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, verify_ssl_certs=False)
29
- chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, verify_ssl_certs=False)
30
- chat_plus = GigaChat(credentials=gc_key, model='GigaChat-Plus', max_tokens=68, verify_ssl_certs=False)
31
 
32
  # Загрузка данных из Excel-файла
33
  try:
@@ -188,7 +188,8 @@ def generate_message_gpt4o(prompt):
188
  data = {
189
  "model": "chatgpt-4o-latest",
190
  "messages": [{"role": "system", "content": prompt}],
191
- "max_tokens": 101
 
192
  }
193
  response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers)
194
  response_data = response.json()
@@ -240,7 +241,7 @@ def generate_message_meta_llama_3_1_405b(prompt):
240
  model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
241
  messages=[{"role": "user", "content": prompt}],
242
  max_tokens=74,
243
- temperature=0.7
244
  )
245
  cleaned_message = clean_message(response.choices[0].message.content.strip())
246
  return cleaned_message
@@ -253,7 +254,7 @@ def generate_message_meta_llama_3_1_70b(prompt):
253
  model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
254
  messages=[{"role": "user", "content": prompt}],
255
  max_tokens=74,
256
- temperature=0.7
257
  )
258
  cleaned_message = clean_message(response.choices[0].message.content.strip())
259
  return cleaned_message
@@ -266,7 +267,7 @@ def generate_message_meta_llama_3_1_8b(prompt):
266
  model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
267
  messages=[{"role": "user", "content": prompt}],
268
  max_tokens=74,
269
- temperature=0.7
270
  )
271
  cleaned_message = clean_message(response.choices[0].message.content.strip())
272
  return cleaned_message
@@ -279,7 +280,7 @@ def generate_message_gemma_2_27b_it(prompt):
279
  model="google/gemma-2-27b-it",
280
  messages=[{"role": "user", "content": prompt}],
281
  max_tokens=74,
282
- temperature=0.7
283
  )
284
  cleaned_message = clean_message(response.choices[0].message.content.strip())
285
  return cleaned_message
@@ -292,7 +293,7 @@ def generate_message_gemma_2_9b_it(prompt):
292
  model="google/gemma-2-9b-it",
293
  messages=[{"role": "user", "content": prompt}],
294
  max_tokens=74,
295
- temperature=0.7
296
  )
297
  cleaned_message = clean_message(response.choices[0].message.content.strip())
298
  return cleaned_message
@@ -307,7 +308,8 @@ def generate_message_mistral(prompt):
307
  {
308
  "role": "user",
309
  "content": prompt,
310
- "max_tokens": 74
 
311
  },
312
  ]
313
  )
 
25
  client_mistral = Mistral(api_key=MISTRAL_API_KEY)
26
 
27
  # Авторизация в сервисе GigaChat
28
+ chat_pro = GigaChat(credentials=gc_key, model='GigaChat-Pro', max_tokens=68, temperature=1, verify_ssl_certs=False)
29
+ chat_lite = GigaChat(credentials=gc_key, model='GigaChat', max_tokens=68, temperature=1, verify_ssl_certs=False)
30
+ chat_plus = GigaChat(credentials=gc_key, model='GigaChat-Plus', max_tokens=68, temperature=1, verify_ssl_certs=False)
31
 
32
  # Загрузка данных из Excel-файла
33
  try:
 
188
  data = {
189
  "model": "chatgpt-4o-latest",
190
  "messages": [{"role": "system", "content": prompt}],
191
+ "max_tokens": 101,
192
+ "temperature": 1.1
193
  }
194
  response = requests.post("https://api.openai.com/v1/chat/completions", json=data, headers=headers)
195
  response_data = response.json()
 
241
  model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
242
  messages=[{"role": "user", "content": prompt}],
243
  max_tokens=74,
244
+ temperature=0.8
245
  )
246
  cleaned_message = clean_message(response.choices[0].message.content.strip())
247
  return cleaned_message
 
254
  model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
255
  messages=[{"role": "user", "content": prompt}],
256
  max_tokens=74,
257
+ temperature=0.8
258
  )
259
  cleaned_message = clean_message(response.choices[0].message.content.strip())
260
  return cleaned_message
 
267
  model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
268
  messages=[{"role": "user", "content": prompt}],
269
  max_tokens=74,
270
+ temperature=0.8
271
  )
272
  cleaned_message = clean_message(response.choices[0].message.content.strip())
273
  return cleaned_message
 
280
  model="google/gemma-2-27b-it",
281
  messages=[{"role": "user", "content": prompt}],
282
  max_tokens=74,
283
+ temperature=0.8
284
  )
285
  cleaned_message = clean_message(response.choices[0].message.content.strip())
286
  return cleaned_message
 
293
  model="google/gemma-2-9b-it",
294
  messages=[{"role": "user", "content": prompt}],
295
  max_tokens=74,
296
+ temperature=0.8
297
  )
298
  cleaned_message = clean_message(response.choices[0].message.content.strip())
299
  return cleaned_message
 
308
  {
309
  "role": "user",
310
  "content": prompt,
311
+ "max_tokens": 74,
312
+ "temperature": 0.8
313
  },
314
  ]
315
  )