Spaces:
Runtime error
Runtime error
把函数插件并行数量限制放到config中
Browse files- config.py +3 -0
- crazy_functions/Latex全文润色.py +1 -1
- crazy_functions/Latex全文翻译.py +1 -1
- crazy_functions/crazy_utils.py +8 -5
- crazy_functions/批量Markdown翻译.py +1 -1
- crazy_functions/批量翻译PDF文档_多线程.py +1 -1
config.py
CHANGED
|
@@ -33,6 +33,9 @@ LAYOUT = "LEFT-RIGHT" # "LEFT-RIGHT"(左右布局) # "TOP-DOWN"(上下布
|
|
| 33 |
# 发送请求到OpenAI后,等待多久判定为超时
|
| 34 |
TIMEOUT_SECONDS = 30
|
| 35 |
|
|
|
|
|
|
|
|
|
|
| 36 |
# 网页的端口, -1代表随机端口
|
| 37 |
WEB_PORT = -1
|
| 38 |
|
|
|
|
| 33 |
# 发送请求到OpenAI后,等待多久判定为超时
|
| 34 |
TIMEOUT_SECONDS = 30
|
| 35 |
|
| 36 |
+
# 多线程函数插件中,默认允许多少路线程同时访问OpenAI。OpenAI的限制是不能超过20
|
| 37 |
+
DEFAULT_WORKER_NUM = 8
|
| 38 |
+
|
| 39 |
# 网页的端口, -1代表随机端口
|
| 40 |
WEB_PORT = -1
|
| 41 |
|
crazy_functions/Latex全文润色.py
CHANGED
|
@@ -92,7 +92,7 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
| 92 |
chatbot=chatbot,
|
| 93 |
history_array=[[""] for _ in range(n_split)],
|
| 94 |
sys_prompt_array=sys_prompt_array,
|
| 95 |
-
max_workers=5, # 并行任务数量限制,最多同时执行5个,其他的排队等待
|
| 96 |
scroller_max_len = 80
|
| 97 |
)
|
| 98 |
|
|
|
|
| 92 |
chatbot=chatbot,
|
| 93 |
history_array=[[""] for _ in range(n_split)],
|
| 94 |
sys_prompt_array=sys_prompt_array,
|
| 95 |
+
# max_workers=5, # 并行任务数量限制,最多同时执行5个,其他的排队等待
|
| 96 |
scroller_max_len = 80
|
| 97 |
)
|
| 98 |
|
crazy_functions/Latex全文翻译.py
CHANGED
|
@@ -90,7 +90,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
| 90 |
chatbot=chatbot,
|
| 91 |
history_array=[[""] for _ in range(n_split)],
|
| 92 |
sys_prompt_array=sys_prompt_array,
|
| 93 |
-
max_workers=5, # OpenAI所允许的最大并行过载
|
| 94 |
scroller_max_len = 80
|
| 95 |
)
|
| 96 |
|
|
|
|
| 90 |
chatbot=chatbot,
|
| 91 |
history_array=[[""] for _ in range(n_split)],
|
| 92 |
sys_prompt_array=sys_prompt_array,
|
| 93 |
+
# max_workers=5, # OpenAI所允许的最大并行过载
|
| 94 |
scroller_max_len = 80
|
| 95 |
)
|
| 96 |
|
crazy_functions/crazy_utils.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
| 1 |
import traceback
|
| 2 |
-
from toolbox import update_ui
|
| 3 |
|
| 4 |
def input_clipping(inputs, history, max_token_limit):
|
| 5 |
import tiktoken
|
| 6 |
import numpy as np
|
| 7 |
-
from toolbox import get_conf
|
| 8 |
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
| 9 |
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
| 10 |
|
|
@@ -132,7 +131,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|
| 132 |
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
| 133 |
inputs_array, inputs_show_user_array, llm_kwargs,
|
| 134 |
chatbot, history_array, sys_prompt_array,
|
| 135 |
-
refresh_interval=0.2, max_workers
|
| 136 |
handle_token_exceed=True, show_user_at_complete=False,
|
| 137 |
retry_times_at_unknown_error=2,
|
| 138 |
):
|
|
@@ -153,7 +152,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
| 153 |
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
|
| 154 |
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
| 155 |
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
| 156 |
-
max_workers (int, optional): Maximum number of threads (default:
|
| 157 |
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
|
| 158 |
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
|
| 159 |
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
|
@@ -168,6 +167,10 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
| 168 |
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
| 169 |
assert len(inputs_array) == len(history_array)
|
| 170 |
assert len(inputs_array) == len(sys_prompt_array)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
executor = ThreadPoolExecutor(max_workers=max_workers)
|
| 172 |
n_frag = len(inputs_array)
|
| 173 |
# 用户反馈
|
|
@@ -176,7 +179,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
| 176 |
# 跨线程传递
|
| 177 |
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
| 178 |
|
| 179 |
-
#
|
| 180 |
def _req_gpt(index, inputs, history, sys_prompt):
|
| 181 |
gpt_say = ""
|
| 182 |
retry_op = retry_times_at_unknown_error
|
|
|
|
| 1 |
import traceback
|
| 2 |
+
from toolbox import update_ui, get_conf
|
| 3 |
|
| 4 |
def input_clipping(inputs, history, max_token_limit):
|
| 5 |
import tiktoken
|
| 6 |
import numpy as np
|
|
|
|
| 7 |
enc = tiktoken.encoding_for_model(*get_conf('LLM_MODEL'))
|
| 8 |
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
| 9 |
|
|
|
|
| 131 |
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
| 132 |
inputs_array, inputs_show_user_array, llm_kwargs,
|
| 133 |
chatbot, history_array, sys_prompt_array,
|
| 134 |
+
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
| 135 |
handle_token_exceed=True, show_user_at_complete=False,
|
| 136 |
retry_times_at_unknown_error=2,
|
| 137 |
):
|
|
|
|
| 152 |
history_array (list): List of chat history (历史对话输入,双层列表,第一层列表是子任务分解,第二层列表是对话历史)
|
| 153 |
sys_prompt_array (list): List of system prompts (系统输入,列表,用于输入给GPT的前提提示,比如你是翻译官怎样怎样)
|
| 154 |
refresh_interval (float, optional): Refresh interval for UI (default: 0.2) (刷新时间间隔频率,建议低于1,不可高于3,仅仅服务于视觉效果)
|
| 155 |
+
max_workers (int, optional): Maximum number of threads (default: see config.py) (最大线程数,如果子任务非常多,需要用此选项防止高频地请求openai导致错误)
|
| 156 |
scroller_max_len (int, optional): Maximum length for scroller (default: 30)(数据流的显示最后收到的多少个字符,仅仅服务于视觉效果)
|
| 157 |
handle_token_exceed (bool, optional): (是否在输入过长时,自动缩减文本)
|
| 158 |
handle_token_exceed:是否自动处理token溢出的情况,如果选择自动处理,则会在溢出时暴力截断,默认开启
|
|
|
|
| 167 |
from request_llm.bridge_chatgpt import predict_no_ui_long_connection
|
| 168 |
assert len(inputs_array) == len(history_array)
|
| 169 |
assert len(inputs_array) == len(sys_prompt_array)
|
| 170 |
+
if max_workers == -1: # 读取配置文件
|
| 171 |
+
try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
|
| 172 |
+
except: max_workers = 8
|
| 173 |
+
if max_workers <= 0 or max_workers >= 20: max_workers = 8
|
| 174 |
executor = ThreadPoolExecutor(max_workers=max_workers)
|
| 175 |
n_frag = len(inputs_array)
|
| 176 |
# 用户反馈
|
|
|
|
| 179 |
# 跨线程传递
|
| 180 |
mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
|
| 181 |
|
| 182 |
+
# 子线程任务
|
| 183 |
def _req_gpt(index, inputs, history, sys_prompt):
|
| 184 |
gpt_say = ""
|
| 185 |
retry_op = retry_times_at_unknown_error
|
crazy_functions/批量Markdown翻译.py
CHANGED
|
@@ -73,7 +73,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
| 73 |
chatbot=chatbot,
|
| 74 |
history_array=[[""] for _ in range(n_split)],
|
| 75 |
sys_prompt_array=sys_prompt_array,
|
| 76 |
-
max_workers=5, # OpenAI所允许的最大并行过载
|
| 77 |
scroller_max_len = 80
|
| 78 |
)
|
| 79 |
|
|
|
|
| 73 |
chatbot=chatbot,
|
| 74 |
history_array=[[""] for _ in range(n_split)],
|
| 75 |
sys_prompt_array=sys_prompt_array,
|
| 76 |
+
# max_workers=5, # OpenAI所允许的最大并行过载
|
| 77 |
scroller_max_len = 80
|
| 78 |
)
|
| 79 |
|
crazy_functions/批量翻译PDF文档_多线程.py
CHANGED
|
@@ -98,7 +98,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
|
|
| 98 |
history_array=[[paper_meta] for _ in paper_fragments],
|
| 99 |
sys_prompt_array=[
|
| 100 |
"请你作为一个学术翻译,负责把学术论文的片段准确翻译成中文。" for _ in paper_fragments],
|
| 101 |
-
max_workers=5 # OpenAI所允许的最大并行过载
|
| 102 |
)
|
| 103 |
|
| 104 |
# 整理报告的格式
|
|
|
|
| 98 |
history_array=[[paper_meta] for _ in paper_fragments],
|
| 99 |
sys_prompt_array=[
|
| 100 |
"请你作为一个学术翻译,负责把学术论文的片段准确翻译成中文。" for _ in paper_fragments],
|
| 101 |
+
# max_workers=5 # OpenAI所允许的最大并行过载
|
| 102 |
)
|
| 103 |
|
| 104 |
# 整理报告的格式
|