wangrongsheng commited on
Commit
aa77a85
·
1 Parent(s): f501d01

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +27 -0
  2. app_cn.py +182 -0
  3. app_js.py +196 -0
  4. functions.py +569 -0
  5. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ Author: Rongsheng Wang
3
+ Data: 2023.09.10
4
+ Describution: News data maker.
5
+ Usage: python app.py
6
+ '''
7
+ import gradio as gr
8
+ import gradio
9
+
10
+ from app_cn import demo as demo_cn
11
+
12
+ css = """
13
+ .table-wrap .cell-wrap input {min-width:80%}
14
+ #api-key-textbox textarea {filter:blur(8px); transition: filter 0.25s}
15
+ #api-key-textbox textarea:focus {filter:none}
16
+ #chat-log-md hr {margin-top: 1rem; margin-bottom: 1rem;}
17
+ #chat-markdown-wrap-box {max-height:80vh; overflow: auto !important;}
18
+ """
19
+ demo = gr.TabbedInterface(
20
+ [
21
+ demo_cn
22
+ ], [
23
+ "中文版"
24
+ ], css=css)
25
+
26
+ if __name__ == "__main__":
27
+ demo.queue(concurrency_count=200).launch()
app_cn.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ import gradio
3
+ # import lmdb
4
+ # import base64
5
+ # import io
6
+ # import random
7
+ # import time
8
+ import json
9
+ import copy
10
+ # import sqlite3
11
+ from urllib.parse import urljoin
12
+ import openai
13
+
14
+ from app_js import api_key__get_from_browser, api_key__save_to_browser, saved_prompts_refresh_btn__click_js, selected_saved_prompt_title__change_js, saved_prompts_delete_btn__click_js, saved_prompts_save_btn__click_js, copy_prompt__click_js, paste_prompt__click_js, chat_copy_history_btn__click_js, chat_copy_history_md_btn__click_js, api_key_refresh_btn__click_js, api_key_save_btn__click_js
15
+
16
+ from functions import sequential_chat_fn, make_history_file_fn, on_click_send_btn, clear_history, copy_history, update_saved_prompt_titles, save_prompt, load_saved_prompt
17
+
18
+ introduction = """<center><h2>ChatGPT 数据蒸馏助手</h2></center>
19
+ """
20
+
21
+
22
+ css = """
23
+ .table-wrap .cell-wrap input {min-width:80%}
24
+ #api-key-textbox textarea {filter:blur(8px); transition: filter 0.25s}
25
+ #api-key-textbox textarea:focus {filter:none}
26
+ #chat-log-md hr {margin-top: 1rem; margin-bottom: 1rem;}
27
+ #chat-markdown-wrap-box {max-height:80vh; overflow: auto !important;}
28
+ """
29
+ with gradio.Blocks(title="ChatGPT 批处理", css=css) as demo:
30
+
31
+ with gradio.Accordion("说明", open=True):
32
+ gradio.Markdown(introduction)
33
+
34
+ with gradio.Accordion("基本设置", open=False):
35
+ system_prompt_enabled = gradio.Checkbox(label='是否使用系统全局提示语', info='是否要以“系统”身份,给 ChatGPT 描述任务?', value=True)
36
+ # 系统提示
37
+ system_prompt = gradio.Textbox(label='系统级全局提示语', info='以“系统”身份,给 ChatGPT 描述任务', value='你是一个医院导诊员,患者给你医院科室名称,请根据医院科室名称介绍一下该科室的作用,可以治疗的疾病,跟其它哪些科室联系比较紧密从而协助患者疾病的救治。请注意:你应该返回科室的作用、可以治疗的疾病、跟其它哪些科室联系比较紧密从而协助患者疾病的救治,而不要返回多余的内容,否则用户所使用的程序将会出错,给用户带来严重的损失。')
38
+ # 用户消息模板
39
+ user_message_template = gradio.Textbox(label='用户消息模板', info='要批量发送的消息的模板', value='科室名称:```___```')
40
+ with gradio.Row():
41
+ # 用户消息模板中的替换区
42
+ user_message_template_mask = gradio.Textbox(label='模板占位符', info='消息模板中需要被替换的部分,可以是正则表达式', value='___')
43
+ # 用户消息模板中的替换区是正则吗
44
+ user_message_template_mask_is_regex = gradio.Checkbox(label='模板占位符是正则吗', info='模板占位符是不是正则表达式?', value=False)
45
+ # 用户消息替换区清单文本
46
+ user_message_list_text = gradio.Textbox(label='用户消息列表', info='所有待发送的消息', value='全科医学 心内科 感染病科 血液科 内分泌科 呼吸科 肾脏科 消化内科 风湿免疫科 肿瘤科 神经内科')
47
+ with gradio.Row():
48
+ # 用户消息替换区清单分隔符
49
+ user_message_list_text_splitter = gradio.Textbox(label='用户消息分隔符', info='用于分割用户消息列表的分隔符,如逗号(`,`)、换行符(`\\n`)等,也可以是正则表达式,此处默认空格', value='\\s+')
50
+ # 用户消息替换区清单分隔符是正则吗
51
+ user_message_list_text_splitter_is_regex = gradio.Checkbox(label='分隔符是正则吗', info='用户消息分隔符是不是正则表达式?', value=True)
52
+ # 历史记录条数
53
+ history_prompt_num = gradio.Slider(label="发送历史记录条数", info='每次发生消息时,同时携带多少条先前的历史记录(以便 ChatGPT 了解上下文)', value=0, minimum=0, maximum=12000)
54
+
55
+ # load_config_from_browser = gradio.Button("🔄 从浏览器加载配置")
56
+ # save_config_to_browser = gradio.Button("💾 将配置保存到浏览器")
57
+ # export_config_to_file = gradio.Button("📤 将配置导出为文件")
58
+
59
+ # 更多参数
60
+ with gradio.Accordion("更多参数", open=False):
61
+ # 时间间隔
62
+ sleep_base = gradio.Number(label='时间间隔 ms', value=700)
63
+ # 时间间隔浮动
64
+ sleep_rand = gradio.Number(label='时间间隔浮动 ms', value=200)
65
+ # 那些参数
66
+ prop_stream = gradio.Checkbox(label="流式传输 stream", value=True)
67
+ prop_model = gradio.Textbox(label="模型 model", value="gpt-3.5-turbo")
68
+ prop_temperature = gradio.Slider(label="temperature", value=0.7, minimum=0, maximum=2)
69
+ prop_top_p = gradio.Slider(label="top_p", value=1, minimum=0, maximum=1)
70
+ prop_choices_num = gradio.Slider(label="choices num(n)", value=1, minimum=1, maximum=20)
71
+ prop_max_tokens = gradio.Slider(label="max_tokens", value=-1, minimum=-1, maximum=4096)
72
+ prop_presence_penalty = gradio.Slider(label="presence_penalty", value=0, minimum=-2, maximum=2)
73
+ prop_frequency_penalty = gradio.Slider(label="frequency_penalty", value=0, minimum=-2, maximum=2)
74
+ prop_logit_bias = gradio.Textbox(label="logit_bias", visible=False)
75
+ pass
76
+
77
+ # API-Key
78
+ token_text = gradio.Textbox(visible=False)
79
+ with gradio.Row():
80
+ with gradio.Column(scale=10, min_width=100):
81
+ api_key_text = gradio.Textbox(label="OpenAI-APIkey", placeholder="sk-...", elem_id="api-key-textbox", value='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')
82
+ # with gradio.Column(scale=1, min_width=100):
83
+ # api_key_load_btn = gradio.Button("🔄 从浏览器本地存储加载")
84
+ # api_key_load_btn.click(
85
+ # None,
86
+ # inputs=[],
87
+ # outputs=[api_key_text, token_text],
88
+ # _js=api_key__get_from_browser,
89
+ # )
90
+ # with gradio.Column(scale=1, min_width=100):
91
+ # api_key_save_btn = gradio.Button("💾 保存到浏览器本地存储")
92
+ # api_key_save_btn.click(
93
+ # None,
94
+ # inputs=[api_key_text, token_text],
95
+ # outputs=[api_key_text, token_text],
96
+ # _js=api_key__save_to_browser,
97
+ # )
98
+ pass
99
+ pass
100
+
101
+ # 开始执行按钮
102
+ start_btn = gradio.Button(value='开始!')
103
+
104
+ with gradio.Accordion(label="数据记录", elem_id='chat-markdown-wrap-box'):
105
+ # 输出区域(隐藏状态)
106
+ history = gradio.State(value=[])
107
+ # 输出区域(md渲染)
108
+ history_md_stable = gradio.Markdown(value="用户")
109
+ history_md_stream = gradio.Markdown(value="助手")
110
+
111
+ with gradio.Accordion("状态"):
112
+ tips = gradio.Markdown(value="待命")
113
+
114
+ # 中止执行按钮
115
+ stop_btn = gradio.Button(value='中止!')
116
+
117
+ with gradio.Accordion("下载数据", open=True):
118
+ # gradio.Markdown("(暂时无法下载,可能是 Hugging Face 的限制,之后更新)")
119
+ make_file_btn = gradio.Button(value='生成文件')
120
+ with gradio.Row(visible=False) as file_row:
121
+ # 下载区域(json文件)
122
+ history_file_json = gradio.File(label='Json 下载', interactive=False)
123
+ # 下载区域(md文件)
124
+ history_file_md = gradio.File(label='Markdown 下载', interactive=False)
125
+ pass
126
+ pass
127
+
128
+
129
+ make_file_btn.click(
130
+ fn=make_history_file_fn,
131
+ inputs=[history],
132
+ outputs=[history_file_json, history_file_md, file_row],
133
+ )
134
+
135
+
136
+ start_event = start_btn.click(
137
+ fn=sequential_chat_fn,
138
+ inputs=[
139
+ history,
140
+
141
+ system_prompt_enabled,
142
+ system_prompt,
143
+ user_message_template,
144
+ user_message_template_mask,
145
+ user_message_template_mask_is_regex,
146
+ user_message_list_text,
147
+ user_message_list_text_splitter,
148
+ user_message_list_text_splitter_is_regex,
149
+ history_prompt_num,
150
+
151
+ api_key_text, token_text,
152
+
153
+ sleep_base,
154
+ sleep_rand,
155
+ prop_stream,
156
+ prop_model,
157
+ prop_temperature,
158
+ prop_top_p,
159
+ prop_choices_num,
160
+ prop_max_tokens,
161
+ prop_presence_penalty,
162
+ prop_frequency_penalty,
163
+ prop_logit_bias,
164
+ ],
165
+ outputs=[
166
+ history,
167
+ history_md_stable,
168
+ history_md_stream,
169
+ tips,
170
+ file_row,
171
+ ],
172
+ )
173
+ stop_btn.click(
174
+ fn=None,
175
+ inputs=[],
176
+ outputs=[],
177
+ cancels=[start_event],
178
+ )
179
+
180
+
181
+ if __name__ == "__main__":
182
+ demo.queue(concurrency_count=200).launch()
app_js.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ saved_prompts_refresh_btn__click_js = """(global_state_json, saved_prompts)=>{
3
+ try {
4
+ if(global_state_json=="") {global_state_json=null;};
5
+ console.log('global_state_json:\\n', global_state_json);
6
+ const global_state = JSON.parse(global_state_json??"{ }")??{ };
7
+
8
+ const saved = (JSON.parse(localStorage?.getItem?.('[gradio][chat-gpt-ui][prompts]') ?? '[]'));
9
+ console.log('saved:\\n', saved);
10
+ global_state['saved_prompts'] = saved;
11
+ global_state['selected_saved_prompt_title'] = saved.map(it=>it?.title??"[untitled]")[0];
12
+
13
+ const results = [JSON.stringify(global_state), global_state['selected_saved_prompt_title']];
14
+ console.log(results);
15
+ return results;
16
+ } catch(error) {
17
+ console.log(error);
18
+ return ["{ }", ""];
19
+ };
20
+ }"""
21
+
22
+
23
+ selected_saved_prompt_title__change_js = """(global_state_json, selected_saved_prompt_title)=>{
24
+ if(global_state_json=="") {global_state_json=null;};
25
+ const global_state = JSON.parse(global_state_json??"{ }")??{ };
26
+ const found = (global_state?.['saved_prompts']??[]).find(it=>it?.title==selected_saved_prompt_title);
27
+ return [JSON.stringify(global_state), found?.title??'', found?.content??{data:[], headers:["role", "content"]}];
28
+ }"""
29
+
30
+
31
+ saved_prompts_delete_btn__click_js = """(global_state_json, saved_prompts, prompt_title, prompt_table)=>{
32
+ if(prompt_title==""||!prompt_title){
33
+ return [global_state_json, selected_saved_prompt_title, prompt_title, prompt_table];
34
+ };
35
+ console.log('global_state_json:\\n', global_state_json);
36
+
37
+ if(global_state_json=="") {global_state_json=null;};
38
+ const global_state = JSON.parse(global_state_json??"{ }")??{ };
39
+ console.log(global_state);
40
+
41
+ const saved = (JSON.parse(localStorage?.getItem?.('[gradio][chat-gpt-ui][prompts]') ?? '[]'));
42
+ console.log('saved:\\n', saved);
43
+
44
+
45
+ global_state['saved_prompts'] = saved?.filter?.(it=>it.title!=prompt_title)??[];
46
+
47
+ global_state['selected_saved_prompt_title'] = "";
48
+
49
+ console.log(global_state);
50
+
51
+ localStorage?.setItem?.('[gradio][chat-gpt-ui][prompts]', JSON.stringify(global_state['saved_prompts']));
52
+
53
+ return [JSON.stringify(global_state), "", "", {data: [], headers: ['role', 'content']}];
54
+ }"""
55
+
56
+
57
+ saved_prompts_save_btn__click_js = """(global_state_json, saved_prompts, prompt_title, prompt_table)=>{
58
+ if(prompt_title==""||!prompt_title){
59
+ return [global_state_json, selected_saved_prompt_title, prompt_title, prompt_table];
60
+ };
61
+ console.log('global_state_json:\\n', global_state_json);
62
+
63
+ if(global_state_json=="") {global_state_json=null;};
64
+ const global_state = JSON.parse(global_state_json??"{ }")??{ };
65
+ console.log(global_state);
66
+
67
+ const saved = (JSON.parse(localStorage?.getItem?.('[gradio][chat-gpt-ui][prompts]') ?? '[]'));
68
+ console.log('saved:\\n', saved);
69
+
70
+
71
+ const new_prompt_obj = {
72
+ title: prompt_title, content: prompt_table,
73
+ };
74
+
75
+ global_state['saved_prompts'] = saved?.filter?.(it=>it.title!=prompt_title)??[];
76
+
77
+ global_state['saved_prompts'].unshift(new_prompt_obj);
78
+
79
+ global_state['selected_saved_prompt_title'] = prompt_title;
80
+
81
+ console.log(global_state);
82
+
83
+ localStorage?.setItem?.('[gradio][chat-gpt-ui][prompts]', JSON.stringify(global_state['saved_prompts']));
84
+
85
+ return [JSON.stringify(global_state), prompt_title, prompt_title, prompt_table];
86
+ }"""
87
+
88
+
89
+ copy_prompt__click_js = """(prompt_title, prompt_table)=>{
90
+ try {
91
+ const txt = JSON.stringify({
92
+ title: prompt_title,
93
+ content: prompt_table,
94
+ }, null, 2);
95
+ console.log(txt);
96
+ const promise = navigator?.clipboard?.writeText?.(txt);
97
+ } catch(error) {console?.log?.(error);};
98
+ return [prompt_title, prompt_table];
99
+ }"""
100
+
101
+
102
+ paste_prompt__click_js = """async (prompt_title, prompt_table)=>{
103
+ console.log("flag1");
104
+ try {
105
+ const promise = navigator?.clipboard?.readText?.();
106
+ console.log(promise);
107
+ console.log("flag1 p");
108
+ const result = await promise?.then?.((txt)=>{
109
+ console.log("flag1 t");
110
+ const json = JSON.parse(txt);
111
+ const title = json?.title ?? "";
112
+ console.log("flag1 0");
113
+ console.log(title);
114
+ const content = json?.content ?? {data: [], headers: ['role', 'content']};
115
+ console.log(content);
116
+ const result = [title, content];
117
+ console.log("flag1 1");
118
+ console.log(result);
119
+ console.log("flag1 2");
120
+ return result;
121
+ });
122
+ console.log("flag1 3");
123
+ if (result!=null) {
124
+ return result;
125
+ };
126
+ } catch(error) {console?.log?.(error);};
127
+ console.log("flag2");
128
+ try {
129
+ const promise = navigator?.clipboard?.read?.();
130
+ console.log(promise);
131
+ promise?.then?.((data)=>{
132
+ console.log(data);
133
+ });
134
+ } catch(error) {console?.log?.(error);};
135
+ console.log("flag3");
136
+ return [prompt_title, prompt_table];
137
+ }"""
138
+
139
+
140
+ chat_copy_history_btn__click_js = """(txt)=>{
141
+ console.log(txt);
142
+ try {let promise = navigator?.clipboard?.writeText?.(txt);}
143
+ catch(error) {console?.log?.(error);};
144
+ }"""
145
+
146
+
147
+ chat_copy_history_md_btn__click_js = """(txt)=>{
148
+ console.log(txt);
149
+ try {let promise = navigator?.clipboard?.writeText?.(txt);}
150
+ catch(error) {console?.log?.(error);};
151
+ }"""
152
+
153
+
154
+ # api_key_refresh_btn__click_js = """()=>{
155
+ # const the_api_key = localStorage?.getItem?.('[gradio][chat-gpt-ui][api_key_text]') ?? '';
156
+ # return the_api_key;
157
+ # }"""
158
+
159
+
160
+ # api_key_save_btn__click_js = """(api_key_text)=>{
161
+ # localStorage.setItem('[gradio][chat-gpt-ui][api_key_text]', api_key_text);
162
+ # return api_key_text;
163
+ # }"""
164
+
165
+
166
+ api_key_refresh_btn__click_js = """()=>{
167
+ const the_api_key = localStorage?.getItem?.('[gradio][chat-gpt-ui][api_key_text]') ?? '';
168
+ return the_api_key;
169
+ }"""
170
+
171
+
172
+ api_key_save_btn__click_js = """(api_key_text)=>{
173
+ localStorage.setItem('[gradio][chat-gpt-ui][api_key_text]', api_key_text);
174
+ return api_key_text;
175
+ }"""
176
+
177
+
178
+ api_key__get_from_browser = """()=>{
179
+ const api_key = localStorage?.getItem?.('[gradio][chat-gpt-ui][api_key]') ?? '';
180
+ const token = localStorage?.getItem?.('[gradio][chat-gpt-ui][token]') ?? '';
181
+ return [api_key, token];
182
+ }"""
183
+
184
+ api_key__save_to_browser = """(api_key, token)=>{
185
+ localStorage?.setItem?.('[gradio][chat-gpt-ui][api_key]', api_key);
186
+ token = localStorage?.getItem?.('[gradio][chat-gpt-ui][token]') ?? token ?? '';
187
+ if (!token?.length) {
188
+ const temp_url = URL.createObjectURL(new Blob());
189
+ const uuid = temp_url.toString();
190
+ URL.revokeObjectURL(temp_url);
191
+ token = uuid.substr(uuid.lastIndexOf("/") + 1);
192
+ };
193
+ localStorage.setItem('[gradio][chat-gpt-ui][token]', token);
194
+ return [api_key, token];
195
+ }"""
196
+
functions.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ import gradio
3
+ # import lmdb
4
+ # import base64
5
+ # import io
6
+ import random
7
+ import time
8
+ import os
9
+ import re
10
+ import sys
11
+ import json
12
+ import copy
13
+ # import sqlite3
14
+ import hashlib
15
+ import uuid
16
+ from urllib.parse import urljoin
17
+ import openai
18
+
19
+
20
+ def get_random_sleep(base_time, random_range):
21
+ return (base_time + random.randint(-random_range, random_range))*0.001
22
+
23
+
24
+ def js_load(txt):
25
+ try:
26
+ return json.loads(txt)
27
+ except Exception as error:
28
+ print('')
29
+ print('js_load:')
30
+ print(str(error))
31
+ print('')
32
+ return None
33
+
34
+
35
+ def js_dump(thing):
36
+ try:
37
+ return json.dumps(thing)
38
+ except Exception as error:
39
+ print('')
40
+ print('js_dump:')
41
+ print(str(error))
42
+ print('')
43
+ return None
44
+
45
+
46
+ def filtered_history(history, num=0):
47
+ if num > 0:
48
+ filtered = list(filter(lambda it:(it['type'] in ['request', 'response']), history))
49
+ return filtered[-num:]
50
+ return []
51
+
52
+
53
+ def filtered_history_messages(history, num=0):
54
+ filtered = filtered_history(history, num)
55
+ return list(map(lambda it:{'role': it.get('role'), 'content': it.get('content')}, filtered))
56
+
57
+
58
+ def make_md_line(role, content):
59
+ return f"""\n##### `{role}`\n\n{content}\n"""
60
+
61
+
62
+ def make_md_by_history(history):
63
+ md = ""
64
+ for item in history:
65
+ md += make_md_line(item.get('role'), item.get('content'))
66
+ return md
67
+
68
+
69
+ def make_history_file_fn(history):
70
+
71
+ uuid4 = str(uuid.uuid4())
72
+ json_file_path = None
73
+ md_file_path = None
74
+
75
+ save_history = []
76
+ for item in history:
77
+ if item["role"] == "assistant":
78
+ info = {
79
+ "content": str(item["content"]).replace("\n", "")
80
+ }
81
+ save_history.append(info)
82
+
83
+ try:
84
+ # 如果目录不存在,则创建目录
85
+ os.makedirs('temp_files', exist_ok=True)
86
+
87
+ json_file_content = json.dumps(save_history, ensure_ascii=False)
88
+ json_file_path = os.path.join('temp_files', f'save_history[{uuid4}].json')
89
+ with open(json_file_path, 'w') as f:
90
+ f.write(json_file_content)
91
+
92
+ md_file_content = make_md_by_history(save_history)
93
+ md_file_path = os.path.join('temp_files', f'save_history[{uuid4}].md')
94
+ with open(md_file_path, 'w') as f:
95
+ f.write(md_file_content)
96
+
97
+ return json_file_path, md_file_path, gradio.update(visible=True)
98
+
99
+ except Exception as error:
100
+ print(f"\n{error}\n")
101
+
102
+ return json_file_path, md_file_path, gradio.update(visible=True)
103
+
104
+
105
+ def make_history_file_fn__(history):
106
+ uuid4 = str(uuid.uuid4())
107
+ try:
108
+ json_file_content = json.dumps(history, ensure_ascii=False)
109
+ json_file_path = os.path.join('temp_files', f'history[{uuid4}].json')
110
+ with open(json_file_path, 'w') as f:
111
+ f.write(json_file_content)
112
+ except Exception as error:
113
+ print(f"\n{error}\n")
114
+ json_file_path = None
115
+ try:
116
+ md_file_content = make_md_by_history(history)
117
+ md_file_path = os.path.join('temp_files', f'history[{uuid4}].md')
118
+ with open(md_file_path, 'w') as f:
119
+ f.write(md_file_content)
120
+ except Exception as error:
121
+ print(f"\n{error}\n")
122
+ md_file_path = None
123
+
124
+ return json_file_path, md_file_path, gradio.update(visible=True)
125
+
126
+
127
+ def make_user_message_list_fn__(
128
+ user_message_template, # 模板,套用到每一条消息上
129
+ user_message_template_mask, # 模板中要被替换的部分
130
+ user_message_template_mask_is_regex, # 决定如何构造用于替换的正则表达式
131
+ user_message_list_text, # 一段文本,包含了每一条用户消息
132
+ user_message_list_text_splitter, # 描述了应该以什么为线索来切分 user_message_list_text
133
+ user_message_list_text_splitter_is_regex, # 决定如何进行切分
134
+ ) -> list:
135
+ # 返回套用了模板的用户信息列表
136
+ # 这个实现首先根据是否使用正则表达式来切分用户消息列表文本,并将切分后的消息存储在一个列表中。
137
+ # 然后,针对每个消息,根据user_message_template_mask及user_message_template_mask_is_regex替换模板中的部分内容,
138
+ # 并将替换后的结果添加到结果列表中。
139
+ # 最后,返回结果列表。
140
+
141
+ # 切分用户消息列表文本
142
+ if user_message_list_text_splitter_is_regex:
143
+ user_messages = re.split(user_message_list_text_splitter, user_message_list_text)
144
+ else:
145
+ user_messages = user_message_list_text.split(user_message_list_text_splitter)
146
+
147
+ # 生成套用模板的用户信息列表
148
+ user_message_result_list = []
149
+ for message in user_messages:
150
+ # 替换模板内容
151
+ if user_message_template_mask_is_regex:
152
+ transformed_message = re.sub(user_message_template_mask, message, user_message_template)
153
+ else:
154
+ transformed_message = user_message_template.replace(user_message_template_mask, message)
155
+
156
+ user_message_result_list.append(transformed_message)
157
+
158
+ return user_message_result_list
159
+
160
+
161
+ def make_user_message_list_fn(
162
+ user_message_template,
163
+ user_message_template_mask,
164
+ user_message_template_mask_is_regex,
165
+ user_message_list_text,
166
+ user_message_list_text_splitter,
167
+ user_message_list_text_splitter_is_regex,
168
+ ) -> list:
169
+
170
+ # 实际上,只要保证在使用正则表达式进行替换或切分操作之前,已经将其编译为正则表达式对象即可。
171
+ # 在我的修改中,针对 xxx_is_regex 参数为 True 的情况,将这些参数编译成正则表达式。
172
+ # 对于替换操作和切分操作,只需检查是否已经编译为正则表达式,并使用相应的方法即可。
173
+
174
+ # 编译正则表达式
175
+ if user_message_template_mask_is_regex:
176
+ user_message_template_mask = re.compile(user_message_template_mask)
177
+
178
+ if user_message_list_text_splitter_is_regex:
179
+ user_message_list_text_splitter = re.compile(user_message_list_text_splitter)
180
+
181
+ # 切分用户消息列表文本
182
+ if user_message_list_text_splitter_is_regex:
183
+ user_messages = user_message_list_text_splitter.split(user_message_list_text)
184
+ else:
185
+ user_messages = user_message_list_text.split(user_message_list_text_splitter)
186
+
187
+ # 生成套用模板的用户信息列表
188
+ user_message_result_list = []
189
+ for message in user_messages:
190
+ # 替换模板内容
191
+ if user_message_template_mask_is_regex:
192
+ transformed_message = user_message_template_mask.sub(message, user_message_template)
193
+ else:
194
+ transformed_message = user_message_template.replace(user_message_template_mask, message)
195
+
196
+ user_message_result_list.append(transformed_message)
197
+
198
+ return user_message_result_list
199
+
200
+
201
+ def sequential_chat_once_fn(payload, api_key_text, history, history_md_stable, history_md_stream, tips):
202
+ # print("\n\n")
203
+
204
+ assistant_message = ""
205
+ tips = ""
206
+
207
+ try:
208
+ openai.api_key = api_key_text
209
+ completion = openai.ChatCompletion.create(**payload)
210
+
211
+ if payload.get('stream'):
212
+ print('assistant:')
213
+ # print('->>>')
214
+ is_first=True
215
+ for chunk in completion:
216
+ if is_first:
217
+ is_first = False
218
+ continue
219
+ if chunk.choices[0].finish_reason is None:
220
+ # sys.stdout.write("\r")
221
+ print(chunk.choices[0].delta.content or '', end="")
222
+ assistant_message += chunk.choices[0].delta.content or ''
223
+ # print(f"\033[2K{assistant_message}", end="")
224
+ history_md_stream = make_md_line('assistant', assistant_message)
225
+ tips = 'streaming'
226
+ yield assistant_message, history_md_stream, tips, history
227
+ else:
228
+ pass
229
+ pass
230
+ # print('=>>>')
231
+ print('')
232
+ pass
233
+ else:
234
+ assistant_message = completion.choices[0].message.content
235
+ history_md_stream = make_md_line('assistant', assistant_message)
236
+ tips = 'got'
237
+ print('assistant:')
238
+ print(assistant_message)
239
+ yield assistant_message, history_md_stream, tips, history
240
+ pass
241
+
242
+ except Exception as error:
243
+ tips = str(error)
244
+ history.append({"role": "app", "content": tips})
245
+ print(f"\n{tips}\n")
246
+ yield assistant_message, history_md_stream, tips, history
247
+ pass
248
+
249
+ # print("\n\n")
250
+
251
+
252
+ def sequential_chat_fn(
253
+ history,
254
+
255
+ system_prompt_enabled,
256
+ system_prompt,
257
+ user_message_template,
258
+ user_message_template_mask,
259
+ user_message_template_mask_is_regex,
260
+ user_message_list_text,
261
+ user_message_list_text_splitter,
262
+ user_message_list_text_splitter_is_regex,
263
+ history_prompt_num,
264
+
265
+ api_key_text, token_text,
266
+
267
+ sleep_base, sleep_rand,
268
+
269
+ prop_stream, prop_model, prop_temperature, prop_top_p, prop_choices_num, prop_max_tokens, prop_presence_penalty, prop_frequency_penalty, prop_logit_bias,
270
+ ):
271
+ # outputs=[
272
+ # history,
273
+ # history_md_stable,
274
+ # history_md_stream,
275
+ # tips,
276
+ # file_row,
277
+ # ],
278
+
279
+ history_md_stable = ""
280
+ history_md_stream = ""
281
+ tips = ""
282
+
283
+ try:
284
+
285
+ user_message_list = make_user_message_list_fn(
286
+ user_message_template,
287
+ user_message_template_mask,
288
+ user_message_template_mask_is_regex,
289
+ user_message_list_text,
290
+ user_message_list_text_splitter,
291
+ user_message_list_text_splitter_is_regex,
292
+ )
293
+
294
+ payload = {
295
+ 'model': prop_model,
296
+ 'temperature': prop_temperature,
297
+ 'top_p': prop_top_p,
298
+ 'n': prop_choices_num,
299
+ 'stream': prop_stream,
300
+ 'presence_penalty': prop_presence_penalty,
301
+ 'frequency_penalty': prop_frequency_penalty,
302
+ 'user': token_text,
303
+ }
304
+ if prop_max_tokens>0:
305
+ payload['max_tokens'] = prop_max_tokens
306
+ # if prop_logit_bias is not None:
307
+ # payload['logit_bias'] = prop_logit_bias
308
+ # headers = {
309
+ # "Content-Type": "application/json",
310
+ # "Authorization": f"Bearer {api_key_text}"
311
+ # }
312
+
313
+ for user_message in user_message_list:
314
+ print('')
315
+ print(f'user({token_text}):')
316
+ print(user_message)
317
+ print('')
318
+ # make the_messages to sent
319
+ the_messages = []
320
+ if system_prompt_enabled:
321
+ the_messages.append({"role": "system", "content": system_prompt})
322
+ for msg in filtered_history_messages(history, num=history_prompt_num):
323
+ the_messages.append(msg)
324
+ the_messages.append({"role": "user", "content": user_message})
325
+ payload['messages'] = the_messages
326
+
327
+ history.append({"role": "user", "content": user_message, "type": "request", "payload": payload})
328
+
329
+ history_md_stable = make_md_by_history(history)
330
+ history_md_stream = ""
331
+ tips = ""
332
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
333
+
334
+ try:
335
+ for (assistant_message, history_md_stream, tips, history) in sequential_chat_once_fn(payload, api_key_text, history, history_md_stable, history_md_stream, tips):
336
+
337
+ yield history, history_md_stable, history_md_stream, tips, gradio.update()
338
+
339
+ history.append({"role": "assistant", "content": assistant_message, "type": "request"})
340
+ history_md_stable += history_md_stream
341
+ history_md_stream = ""
342
+ tips = "fine"
343
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
344
+
345
+ except Exception as error:
346
+ tips = f'error: {str(error)}'
347
+ history.append({"role": "app", "content": tips})
348
+ print(f"\n{tips}\n")
349
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
350
+
351
+ time.sleep(get_random_sleep(sleep_base, sleep_rand))
352
+ pass
353
+
354
+
355
+
356
+ except Exception as error:
357
+ tips = str(error)
358
+ history.append({"role": "app", "content": tips})
359
+ print(f"\n{tips}\n")
360
+ yield history, history_md_stable, history_md_stream, tips, gradio.update(visible=False)
361
+ pass
362
+
363
+
364
+
365
+
366
+ def on_click_send_btn(
367
+ global_state_json, api_key_text, chat_input_role, chat_input, prompt_table, chat_use_prompt, chat_use_history, chat_log,
368
+ chat_model, temperature, top_p, choices_num, stream, max_tokens, presence_penalty, frequency_penalty, logit_bias,
369
+ ):
370
+
371
+ old_state = json.loads(global_state_json or "{}")
372
+
373
+ print('\n\n\n\n\n')
374
+ print(prompt_table)
375
+ prompt_table = prompt_table or []
376
+
377
+ chat_log = chat_log or []
378
+
379
+ chat_log_md = ''
380
+ if chat_use_prompt:
381
+ chat_log_md += '<center>(prompt)</center>\n\n'
382
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
383
+ chat_log_md += '\n---\n'
384
+ if True:
385
+ chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
386
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
387
+ chat_log_md += '\n---\n'
388
+
389
+ # if chat_input=='':
390
+ # return json.dumps(old_state), chat_log, chat_log_md, chat_log_md, None, None, chat_input
391
+
392
+ print('\n')
393
+ print(chat_input)
394
+ print('')
395
+
396
+ try:
397
+ logit_bias_json = json.dumps(logit_bias) if logit_bias else None
398
+ except:
399
+ return json.dumps(old_state), chat_log, chat_log_md, chat_log_md, None, None, chat_input
400
+
401
+ new_state = copy.deepcopy(old_state) or {}
402
+
403
+
404
+
405
+ req_hist = copy.deepcopy(prompt_table) if chat_use_prompt else []
406
+
407
+ if chat_use_history:
408
+ for hh in (chat_log or []):
409
+ req_hist.append(hh)
410
+
411
+ if chat_input and chat_input!="":
412
+ req_hist.append([(chat_input_role or 'user'), chat_input])
413
+
414
+ openai.api_key = api_key_text
415
+
416
+ props = {
417
+ 'model': chat_model,
418
+ 'messages': [xx for xx in map(lambda it: {'role':it[0], 'content':it[1]}, req_hist)],
419
+ 'temperature': temperature,
420
+ 'top_p': top_p,
421
+ 'n': choices_num,
422
+ 'stream': stream,
423
+ 'presence_penalty': presence_penalty,
424
+ 'frequency_penalty': frequency_penalty,
425
+ }
426
+ if max_tokens>0:
427
+ props['max_tokens'] = max_tokens
428
+ if logit_bias_json is not None:
429
+ props['logit_bias'] = logit_bias_json
430
+
431
+ props_json = json.dumps(props)
432
+
433
+ try:
434
+ completion = openai.ChatCompletion.create(**props)
435
+ print('')
436
+
437
+ # print(completion.choices)
438
+ # the_response_role = completion.choices[0].message.role
439
+ # the_response = completion.choices[0].message.content
440
+ # print(the_response)
441
+ # print('')
442
+ # chat_last_resp = json.dumps(completion.__dict__)
443
+ # chat_last_resp_dict = json.loads(chat_last_resp)
444
+ # chat_last_resp_dict['api_key'] = "hidden by UI"
445
+ # chat_last_resp_dict['organization'] = "hidden by UI"
446
+ # chat_last_resp = json.dumps(chat_last_resp_dict)
447
+
448
+
449
+ chat_log_md = ''
450
+ if chat_use_prompt:
451
+ chat_log_md += '<center>(prompt)</center>\n\n'
452
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
453
+ chat_log_md += '\n---\n'
454
+ if True:
455
+ chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
456
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
457
+ chat_log_md += '\n---\n'
458
+
459
+ if chat_input and chat_input!="":
460
+ chat_log.append([(chat_input_role or 'user'), chat_input])
461
+ chat_log_md += f"##### `{(chat_input_role or 'user')}`\n\n{chat_input}\n\n"
462
+
463
+ partial_words = ""
464
+ counter=0
465
+
466
+ if stream:
467
+ the_response = ''
468
+ the_response_role = ''
469
+ for chunk in completion:
470
+ #Skipping first chunk
471
+ if counter == 0:
472
+ the_response_role = chunk.choices[0].delta.role
473
+ chat_log_md += f"##### `{the_response_role}`\n\n"
474
+ counter += 1
475
+ continue
476
+ # print(('chunk', chunk))
477
+ if chunk.choices[0].finish_reason is None:
478
+ the_response_chunk = chunk.choices[0].delta.content
479
+ the_response += the_response_chunk
480
+ chat_log_md += f"{the_response_chunk}"
481
+ yield json.dumps(new_state), chat_log, chat_log_md, chat_log_md, "{}", props_json, ''
482
+ else:
483
+ chat_log.append([the_response_role, the_response])
484
+ chat_log_md += f"\n\n"
485
+ yield json.dumps(new_state), chat_log, chat_log_md, chat_log_md, '{"msg": "stream模式不支持显示"}', props_json, ''
486
+ # chat_last_resp = json.dumps(completion.__dict__)
487
+ # chat_last_resp_dict = json.loads(chat_last_resp)
488
+ # chat_last_resp_dict['api_key'] = "hidden by UI"
489
+ # chat_last_resp_dict['organization'] = "hidden by UI"
490
+ # chat_last_resp = json.dumps(chat_last_resp_dict)
491
+ else:
492
+ the_response_role = completion.choices[0].message.role
493
+ the_response = completion.choices[0].message.content
494
+ print(the_response)
495
+ print('')
496
+
497
+ chat_log.append([the_response_role, the_response])
498
+ chat_log_md += f"##### `{the_response_role}`\n\n{the_response}\n\n"
499
+
500
+ chat_last_resp = json.dumps(completion.__dict__)
501
+ chat_last_resp_dict = json.loads(chat_last_resp)
502
+ chat_last_resp_dict['api_key'] = "hidden by UI"
503
+ chat_last_resp_dict['organization'] = "hidden by UI"
504
+ chat_last_resp = json.dumps(chat_last_resp_dict)
505
+
506
+ return json.dumps(new_state), chat_log, chat_log_md, chat_log_md, chat_last_resp, props_json, ''
507
+
508
+ # chat_log.append([the_response_role, the_response])
509
+ # chat_log_md += f"##### `{the_response_role}`\n\n{the_response}\n\n"
510
+ # return json.dumps(new_state), chat_log, chat_log_md, chat_log_md, chat_last_resp, props_json, ''
511
+ except Exception as error:
512
+ print(error)
513
+ print('error!!!!!!')
514
+
515
+ chat_log_md = ''
516
+ if chat_use_prompt:
517
+ chat_log_md += '<center>(prompt)</center>\n\n'
518
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)])
519
+ chat_log_md += '\n---\n'
520
+ if True:
521
+ chat_log_md += '<center>(history)</center>\n\n' if chat_use_history else '<center>(not used history)</center>\n\n'
522
+ chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", chat_log)])
523
+ chat_log_md += '\n---\n'
524
+
525
+ # chat_log_md = ''
526
+ # chat_log_md = "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", prompt_table)]) if chat_use_prompt else ''
527
+ # chat_log_md += "\n".join([xx for xx in map(lambda it: f"##### `{it[0]}`\n\n{it[1]}\n\n", hist)])
528
+
529
+ chat_log_md += "\n"
530
+ chat_log_md += str(error)
531
+ return json.dumps(new_state), chat_log, chat_log_md, chat_log_md, None, props_json, chat_input
532
+
533
+
534
+ def clear_history():
535
+ return [], ""
536
+
537
+
538
+ def copy_history(txt):
539
+ # print('\n\n copying')
540
+ # print(txt)
541
+ # print('\n\n')
542
+ pass
543
+
544
+
545
+ def update_saved_prompt_titles(global_state_json, selected_saved_prompt_title):
546
+ print('')
547
+ global_state = json.loads(global_state_json or "{}")
548
+ print(global_state)
549
+ print(selected_saved_prompt_title)
550
+ saved_prompts = global_state.get('saved_prompts') or []
551
+ print(saved_prompts)
552
+ the_choices = [(it.get('title') or '[untitled]') for it in saved_prompts]
553
+ print(the_choices)
554
+ print('')
555
+ return gradio.Dropdown.update(choices=the_choices)
556
+
557
+
558
+ def save_prompt(global_state_json, saved_prompts, prompt_title, prompt_table):
559
+ the_choices = []
560
+ global_state = json.loads(global_state_json or "{}")
561
+ saved_prompts = global_state.get('saved_prompts') or []
562
+ if len(saved_prompts):
563
+ the_choices = [it.get('title') or '[untitled]' for it in saved_prompts]
564
+ pass
565
+ return global_state_json, gradio.Dropdown.update(choices=the_choices, value=prompt_title), prompt_title, prompt_table
566
+
567
+
568
+ def load_saved_prompt(title):
569
+ pass
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio>=3.22.1
2
+ openai>=0.27.2