Moonfanz commited on
Commit
76c89e5
·
verified ·
1 Parent(s): c76f23c

Upload 4 files

Browse files
Files changed (2) hide show
  1. app.py +40 -10
  2. func.py +0 -35
app.py CHANGED
@@ -38,6 +38,26 @@ request_counts = {}
38
  api_key_blacklist = set()
39
  api_key_blacklist_duration = 60
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  class APIKeyManager:
42
  def __init__(self):
43
  self.api_keys = os.environ.get('KeyArray').split(',')
@@ -179,10 +199,9 @@ def handle_api_error(error, attempt, stream=False):
179
  logger.warning(f"{current_api_key[:11]} → 429 官方资源耗尽 → {delay} 秒后重试...")
180
  else:
181
  logger.warning(f"{current_api_key[:11]} → 未知错误↙ {delay} 秒后重试...\n{type(error).__name__}\n")
 
 
182
  time.sleep(delay)
183
- if isinstance(error, (ResourceExhausted)):
184
- key_manager.blacklist_key(current_api_key)
185
- switch_api_key()
186
  return False, None
187
 
188
  elif isinstance(error, generation_types.StopCandidateException):
@@ -212,8 +231,8 @@ def chat_completions():
212
  gemini_history, user_message, error_response = func.process_messages_for_gemini(messages)
213
 
214
  if error_response:
215
- logger.error(f"处理输入消息时出错↙\n {error_response}")
216
- return jsonify(error_response), 400
217
 
218
  def do_request(current_api_key, attempt):
219
  isok, time = is_within_rate_limit(current_api_key)
@@ -224,7 +243,18 @@ def chat_completions():
224
 
225
  increment_request_count(current_api_key)
226
 
227
- gen_model = func.get_gen_model(current_api_key, model, temperature, max_tokens)
 
 
 
 
 
 
 
 
 
 
 
228
 
229
  try:
230
  if gemini_history:
@@ -268,7 +298,7 @@ def chat_completions():
268
  yield f"data: {json.dumps(data)}\n\n"
269
  logger.info(f"200!")
270
 
271
- except Exception as e:
272
  logger.error(f"流式输出时截断,请关闭流式输出或修改你的输入")
273
  error_data = {
274
  'error': {
@@ -300,10 +330,10 @@ def chat_completions():
300
  success, response = do_request(current_api_key, attempt)
301
 
302
  if not success:
303
- logger.error(f" {MAX_RETRIES} 次尝试均失败,请调整配置或向Moonfanz反馈")
304
  response = {
305
  'error': {
306
- 'message': f' {MAX_RETRIES} 次尝试均失败,请调整配置或向Moonfanz反馈',
307
  'type': 'internal_server_error'
308
  }
309
  }
@@ -337,7 +367,7 @@ def chat_completions():
337
  'finish_reason': 'stop'
338
  }],
339
  'usage': {
340
- 'prompt_tokens': 0,
341
  'completion_tokens': 0,
342
  'total_tokens': 0
343
  }
 
38
  api_key_blacklist = set()
39
  api_key_blacklist_duration = 60
40
 
41
+ # 核心优势
42
+ safety_settings = [
43
+ {
44
+ "category": "HARM_CATEGORY_HARASSMENT",
45
+ "threshold": "BLOCK_NONE"
46
+ },
47
+ {
48
+ "category": "HARM_CATEGORY_HATE_SPEECH",
49
+ "threshold": "BLOCK_NONE"
50
+ },
51
+ {
52
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
53
+ "threshold": "BLOCK_NONE"
54
+ },
55
+ {
56
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
57
+ "threshold": "BLOCK_NONE"
58
+ },
59
+ ]
60
+
61
  class APIKeyManager:
62
  def __init__(self):
63
  self.api_keys = os.environ.get('KeyArray').split(',')
 
199
  logger.warning(f"{current_api_key[:11]} → 429 官方资源耗尽 → {delay} 秒后重试...")
200
  else:
201
  logger.warning(f"{current_api_key[:11]} → 未知错误↙ {delay} 秒后重试...\n{type(error).__name__}\n")
202
+ key_manager.blacklist_key(current_api_key)
203
+ switch_api_key()
204
  time.sleep(delay)
 
 
 
205
  return False, None
206
 
207
  elif isinstance(error, generation_types.StopCandidateException):
 
231
  gemini_history, user_message, error_response = func.process_messages_for_gemini(messages)
232
 
233
  if error_response:
234
+ logger.error(f"处理输入消息时出错↙\n {error_response}")
235
+ return jsonify(error_response), 400
236
 
237
  def do_request(current_api_key, attempt):
238
  isok, time = is_within_rate_limit(current_api_key)
 
243
 
244
  increment_request_count(current_api_key)
245
 
246
+ genai.configure(api_key=current_api_key)
247
+
248
+ generation_config = {
249
+ "temperature": temperature,
250
+ "max_output_tokens": max_tokens
251
+ }
252
+
253
+ gen_model = genai.GenerativeModel(
254
+ model_name=model,
255
+ generation_config=generation_config,
256
+ safety_settings=safety_settings
257
+ )
258
 
259
  try:
260
  if gemini_history:
 
298
  yield f"data: {json.dumps(data)}\n\n"
299
  logger.info(f"200!")
300
 
301
+ except Exception:
302
  logger.error(f"流式输出时截断,请关闭流式输出或修改你的输入")
303
  error_data = {
304
  'error': {
 
330
  success, response = do_request(current_api_key, attempt)
331
 
332
  if not success:
333
+ logger.error(f"{MAX_RETRIES} 次尝试均失败,请调整配置或向Moonfanz反馈")
334
  response = {
335
  'error': {
336
+ 'message': f'{MAX_RETRIES} 次尝试均失败,请调整配置或向Moonfanz反馈',
337
  'type': 'internal_server_error'
338
  }
339
  }
 
367
  'finish_reason': 'stop'
368
  }],
369
  'usage': {
370
+ 'prompt_tokens': 0,
371
  'completion_tokens': 0,
372
  'total_tokens': 0
373
  }
func.py CHANGED
@@ -13,26 +13,6 @@ logger = logging.getLogger(__name__)
13
 
14
  request_counts = {}
15
 
16
- # 核心优势
17
- safety_settings = [
18
- {
19
- "category": "HARM_CATEGORY_HARASSMENT",
20
- "threshold": "BLOCK_NONE"
21
- },
22
- {
23
- "category": "HARM_CATEGORY_HATE_SPEECH",
24
- "threshold": "BLOCK_NONE"
25
- },
26
- {
27
- "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
28
- "threshold": "BLOCK_NONE"
29
- },
30
- {
31
- "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
32
- "threshold": "BLOCK_NONE"
33
- },
34
- ]
35
-
36
  password = os.environ['password']
37
 
38
  def authenticate_request(request):
@@ -54,21 +34,6 @@ def authenticate_request(request):
54
 
55
  return True, None, None
56
 
57
- def get_gen_model(api_key, model, temperature, max_tokens):
58
- genai.configure(api_key=api_key)
59
-
60
- generation_config = {
61
- "temperature": temperature,
62
- "max_output_tokens": max_tokens
63
- }
64
-
65
- gen_model = genai.GenerativeModel(
66
- model_name=model,
67
- generation_config=generation_config,
68
- safety_settings=safety_settings
69
- )
70
- return gen_model
71
-
72
  def process_messages_for_gemini(messages):
73
  gemini_history = []
74
  errors = []
 
13
 
14
  request_counts = {}
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  password = os.environ['password']
17
 
18
  def authenticate_request(request):
 
34
 
35
  return True, None, None
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def process_messages_for_gemini(messages):
38
  gemini_history = []
39
  errors = []