Moonfanz commited on
Commit
dd71ca7
·
verified ·
1 Parent(s): 2781d8a

Upload 2 files

Browse files
Files changed (1) hide show
  1. app.py +110 -99
app.py CHANGED
@@ -59,6 +59,10 @@ safety_settings = [
59
  {
60
  "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
61
  "threshold": "BLOCK_NONE"
 
 
 
 
62
  }
63
  ]
64
  safety_settings_g2 = [
@@ -77,6 +81,10 @@ safety_settings_g2 = [
77
  {
78
  "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
79
  "threshold": "OFF"
 
 
 
 
80
  }
81
  ]
82
  @dataclass
@@ -230,7 +238,7 @@ GEMINI_MODELS = [
230
 
231
  @app.route('/')
232
  def index():
233
- main_content = "Moonfanz Reminiproxy v2.3.3 2025-01-14"
234
  html_template = """
235
  <!DOCTYPE html>
236
  <html>
@@ -289,98 +297,102 @@ def increment_request_count(api_key):
289
  request_counts[api_key] = deque()
290
  request_counts[api_key].append(now)
291
 
292
- def handle_api_error(error, attempt):
293
  if attempt > MAX_RETRIES:
294
  logger.error(f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入")
295
  return 0, jsonify({
296
- 'error': {
297
- 'message': f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入",
298
- 'type': 'max_retries_exceeded'
299
- }
300
  })
301
 
302
- if isinstance(error, InvalidArgument):
303
- logger.error(f"{current_api_key[:8]} 无效,可能已过期或被删除")
304
- key_manager.blacklist_key(current_api_key)
305
- switch_api_key()
306
- return 0, None
307
 
308
- elif isinstance(error, ResourceExhausted):
309
- delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
310
- logger.warning(f"{current_api_key[:8]} ... {current_api_key[-3:]} → 429 官方资源耗尽 → {delay} 秒后重试...")
311
- key_manager.blacklist_key(current_api_key)
312
- switch_api_key()
313
- time.sleep(delay)
314
- return 0, None
315
 
316
- elif isinstance(error, Aborted):
317
- delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
318
- logger.warning(f"{current_api_key[:8]} ... {current_api_key[-3:]} → 操作被中止 → {delay} 秒后重试...")
319
- switch_api_key()
320
- time.sleep(delay)
321
- return 0, None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322
 
323
- elif isinstance(error, InternalServerError):
 
 
 
 
 
 
 
 
 
324
  delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
325
- logger.warning(f"{current_api_key[:8]} ... {current_api_key[-3:]} 500 服务器内部错误 → {delay} 秒后重试...")
326
- switch_api_key()
327
  time.sleep(delay)
328
  return 0, None
329
 
330
- elif isinstance(error, ServiceUnavailable):
331
  delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
332
- logger.warning(f"{current_api_key[:8]} ... {current_api_key[-3:]} 503 服务不可用 → {delay} 秒后重试...")
333
- switch_api_key()
334
  time.sleep(delay)
335
  return 0, None
336
 
337
- elif isinstance(error, PermissionDenied):
338
- logger.error(f"{current_api_key[:8]} ... {current_api_key[-3:]} → 403 权限被拒绝,该 API KEY 可能已经被官方封禁")
339
- key_manager.blacklist_key(current_api_key)
340
- switch_api_key()
341
- return 0, None
342
-
343
- elif isinstance(error, StopCandidateException):
344
- logger.warning(f"AI输出内容被Gemini官方阻挡,代理没有得到有效回复")
345
- switch_api_key()
346
- return 0, None
347
-
348
- elif isinstance(error, BlockedPromptException):
349
- try:
350
- full_reason_str = str(error.args[0])
351
- logger.error(f"{full_reason_str}")
352
- if "block_reason:" in full_reason_str:
353
- start_index = full_reason_str.find("block_reason:") + len("block_reason:")
354
- block_reason_str = full_reason_str[start_index:].strip()
355
-
356
- if block_reason_str == "SAFETY":
357
- logger.warning(f"用户输入因安全原因被阻止")
358
- return 1, None
359
- elif block_reason_str == "BLOCKLIST":
360
- logger.warning(f"用户输入因包含阻止列表中的术语而被阻止")
361
- return 1, None
362
- elif block_reason_str == "PROHIBITED_CONTENT":
363
- logger.warning(f"用户输入因包含禁止内容而被阻止")
364
- return 1, None
365
- elif block_reason_str == "OTHER":
366
- logger.warning(f"用户输入因未知原因被阻止")
367
- return 1, None
368
- else:
369
- logger.warning(f"用户输入被阻止,原因: {block_reason_str}")
370
- return 1, None
371
- else:
372
- logger.warning(f"用户输入被阻止,原因: {full_reason_str}")
373
- return 1, None
374
-
375
- except (IndexError, AttributeError) as e:
376
- logger.error(f"获取提示原因失败↙\n{e}")
377
- logger.error(f"提示被阻止↙\n{error}")
378
- return 2, None
379
-
380
  else:
381
- logger.error(f"该模型还未发布,暂时不可用,请更换模型或未来一段时间再试")
382
- logger.error(f"证明↙\n{error}")
383
- return 2, None
 
 
 
 
384
 
385
  @app.route('/hf/v1/chat/completions', methods=['POST'])
386
  def chat_completions():
@@ -436,16 +448,14 @@ def chat_completions():
436
 
437
  try:
438
  response = requests.post(url, headers=headers, json=data, stream=True)
439
- response.raise_for_status()
440
 
441
  if stream:
442
  return 1, response
443
  else:
444
  return 1, ResponseWrapper(response.json())
445
  except requests.exceptions.RequestException as e:
446
- return handle_api_error(e, attempt)
447
- except (StopCandidateException, BlockedPromptException) as e:
448
- return handle_api_error(e, attempt)
449
 
450
  def generate_stream(response):
451
  logger.info(f"流式开始 →")
@@ -512,23 +522,14 @@ def chat_completions():
512
 
513
  if success == 0:
514
  continue
515
- elif success == 2:
516
-
517
- logger.error(f"{model} 很可能暂时不可用,请更换模型或未来���段时间再试")
518
- response = {
519
- 'error': {
520
- 'message': f'{model} 很可能暂时不可用,请更换模型或未来一段时间再试',
521
- 'type': 'internal_server_error'
522
- }
523
- }
524
- return jsonify(response), 503
525
-
526
- if stream:
527
  return Response(
528
- stream_with_context(generate_stream(response)),
529
- mimetype='text/event-stream'
530
- )
531
- else:
532
  try:
533
  text_content = response.text
534
  prompt_tokens = response.prompt_token_count
@@ -550,7 +551,7 @@ def chat_completions():
550
  continue
551
 
552
  response_data = {
553
- 'id': 'chatcmpl-xxxxxxxxxxxx',
554
  'object': 'chat.completion',
555
  'created': int(datetime.now().timestamp()),
556
  'model': model,
@@ -570,7 +571,17 @@ def chat_completions():
570
  }
571
  logger.info(f"200!")
572
  return jsonify(response_data)
573
-
 
 
 
 
 
 
 
 
 
 
574
  else:
575
  logger.error(f"{MAX_RETRIES} 次尝试均失败,请调整配置,或等待官方恢复,或向Moonfanz反馈")
576
  response = {
@@ -655,7 +666,7 @@ if __name__ == '__main__':
655
 
656
  scheduler.add_job(keep_alive, 'interval', hours=12)
657
  scheduler.start()
658
- logger.info(f"Reminiproxy v2.3.3 启动")
659
  logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
660
  logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
661
  logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")
 
59
  {
60
  "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
61
  "threshold": "BLOCK_NONE"
62
+ },
63
+ {
64
+ "category": 'HARM_CATEGORY_CIVIC_INTEGRITY',
65
+ "threshold": 'BLOCK_NONE'
66
  }
67
  ]
68
  safety_settings_g2 = [
 
81
  {
82
  "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
83
  "threshold": "OFF"
84
+ },
85
+ {
86
+ "category": 'HARM_CATEGORY_CIVIC_INTEGRITY',
87
+ "threshold": 'OFF'
88
  }
89
  ]
90
  @dataclass
 
238
 
239
  @app.route('/')
240
  def index():
241
+ main_content = "Moonfanz Reminiproxy v2.3.4 2025-01-14"
242
  html_template = """
243
  <!DOCTYPE html>
244
  <html>
 
297
  request_counts[api_key] = deque()
298
  request_counts[api_key].append(now)
299
 
300
+ def handle_api_error(error, attempt, current_api_key):
301
  if attempt > MAX_RETRIES:
302
  logger.error(f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入")
303
  return 0, jsonify({
304
+ 'error': {
305
+ 'message': f"{MAX_RETRIES} 次尝试后仍然失败,请修改预设或输入",
306
+ 'type': 'max_retries_exceeded'
307
+ }
308
  })
309
 
310
+ if isinstance(error, requests.exceptions.HTTPError):
311
+ status_code = error.response.status_code
 
 
 
312
 
313
+ if status_code == 400:
 
 
 
 
 
 
314
 
315
+ try:
316
+ error_data = error.response.json()
317
+ if 'error' in error_data:
318
+ if error_data['error'].get('code') == "invalid_argument":
319
+ logger.error(f"{current_api_key[:8]} ... {current_api_key[-3:]} → 无效,可能已过期或被删除")
320
+ key_manager.blacklist_key(current_api_key)
321
+ switch_api_key()
322
+ return 0, None
323
+ error_message = error_data['error'].get('message', 'Bad Request')
324
+ error_type = error_data['error'].get('type', 'invalid_request_error')
325
+ logger.warning(f"400 Bad Request: {error_message}")
326
+ return 1, jsonify({'error': {'message': error_message, 'type': error_type}})
327
+ except ValueError:
328
+ logger.warning("400 Bad Request (Unable to parse error response)")
329
+ return 1, jsonify({'error': {'message': 'Bad Request', 'type': 'invalid_request_error'}})
330
+
331
+ elif status_code == 429:
332
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
333
+ logger.warning(
334
+ f"{current_api_key[:8]} ... {current_api_key[-3:]} → 429 官方资源耗尽 → {delay} 秒后重试..."
335
+ )
336
+ key_manager.blacklist_key(current_api_key)
337
+ switch_api_key()
338
+ time.sleep(delay)
339
+ return 0, None
340
+
341
+ elif status_code == 403:
342
+ logger.error(
343
+ f"{current_api_key[:8]} ... {current_api_key[-3:]} → 403 权限被拒绝,该 API KEY 可能已经被官方封禁"
344
+ )
345
+ key_manager.blacklist_key(current_api_key)
346
+ switch_api_key()
347
+ return 0, None
348
+
349
+ elif status_code == 500:
350
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
351
+ logger.warning(
352
+ f"{current_api_key[:8]} ... {current_api_key[-3:]} → 500 服务器内部错误 → {delay} 秒后重试..."
353
+ )
354
+ switch_api_key()
355
+ time.sleep(delay)
356
+ return 0, None
357
+
358
+ elif status_code == 503:
359
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
360
+ logger.warning(
361
+ f"{current_api_key[:8]} ... {current_api_key[-3:]} → 503 ��务不可用 → {delay} 秒后重试..."
362
+ )
363
+ switch_api_key()
364
+ time.sleep(delay)
365
+ return 0, None
366
 
367
+ else:
368
+ delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
369
+ logger.warning(
370
+ f"{current_api_key[:8]} ... {current_api_key[-3:]} → {status_code} 未知错误 → {delay} 秒后重试..."
371
+ )
372
+ switch_api_key()
373
+ time.sleep(delay)
374
+ return 0, None
375
+
376
+ elif isinstance(error, requests.exceptions.ConnectionError):
377
  delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
378
+ logger.warning(f"连接错误 → {delay} 秒后重试...")
 
379
  time.sleep(delay)
380
  return 0, None
381
 
382
+ elif isinstance(error, requests.exceptions.Timeout):
383
  delay = min(RETRY_DELAY * (2 ** attempt), MAX_RETRY_DELAY)
384
+ logger.warning(f"请求超时 → {delay} 秒后重试...")
 
385
  time.sleep(delay)
386
  return 0, None
387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
  else:
389
+ logger.error(f"发生未知错误: {error}")
390
+ return 0, jsonify({
391
+ 'error': {
392
+ 'message': f"发生未知错误: {error}",
393
+ 'type': 'unknown_error'
394
+ }
395
+ })
396
 
397
  @app.route('/hf/v1/chat/completions', methods=['POST'])
398
  def chat_completions():
 
448
 
449
  try:
450
  response = requests.post(url, headers=headers, json=data, stream=True)
451
+ response.raise_for_status() # This will raise an HTTPError for bad responses
452
 
453
  if stream:
454
  return 1, response
455
  else:
456
  return 1, ResponseWrapper(response.json())
457
  except requests.exceptions.RequestException as e:
458
+ return handle_api_error(e, attempt, current_api_key)
 
 
459
 
460
  def generate_stream(response):
461
  logger.info(f"流式开始 →")
 
522
 
523
  if success == 0:
524
  continue
525
+ elif success == 1 and response is None:
526
+ continue
527
+ elif success == 1 and isinstance(response, Response):
 
 
 
 
 
 
 
 
 
528
  return Response(
529
+ stream_with_context(generate_stream(response)),
530
+ mimetype='text/event-stream'
531
+ )
532
+ elif success == 1 and isinstance(response, ResponseWrapper):
533
  try:
534
  text_content = response.text
535
  prompt_tokens = response.prompt_token_count
 
551
  continue
552
 
553
  response_data = {
554
+ 'id': 'chatcmpl-xxxxxxxxxxxx',
555
  'object': 'chat.completion',
556
  'created': int(datetime.now().timestamp()),
557
  'model': model,
 
571
  }
572
  logger.info(f"200!")
573
  return jsonify(response_data)
574
+ elif success == 1 and isinstance(response, tuple):
575
+ return response[1], response[0]
576
+ elif success == 2:
577
+ logger.error(f"{model} 很可能暂时不可用,请更换模型或未来一段时间再试")
578
+ response = {
579
+ 'error': {
580
+ 'message': f'{model} 很可能暂时不可用,请更换模型或未来一段时间再试',
581
+ 'type': 'internal_server_error'
582
+ }
583
+ }
584
+ return jsonify(response), 503
585
  else:
586
  logger.error(f"{MAX_RETRIES} 次尝试均失败,请调整配置,或等待官方恢复,或向Moonfanz反馈")
587
  response = {
 
666
 
667
  scheduler.add_job(keep_alive, 'interval', hours=12)
668
  scheduler.start()
669
+ logger.info(f"Reminiproxy v2.3.4 启动")
670
  logger.info(f"最大尝试次数/MaxRetries: {MAX_RETRIES}")
671
  logger.info(f"最大请求次数/MaxRequests: {MAX_REQUESTS}")
672
  logger.info(f"请求限额窗口/LimitWindow: {LIMIT_WINDOW} 秒")