feihu.hf commited on
Commit
fb9699b
·
0 Parent(s):
Files changed (10) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +109 -0
  4. assets/app.css +147 -0
  5. assets/appBot.css +129 -0
  6. assets/logo.jpeg +0 -0
  7. assets/user.jpeg +0 -0
  8. patching.py +305 -0
  9. requirements.txt +3 -0
  10. web_ui.py +376 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Qwen2.5 Turbo 1M Demo
3
+ emoji: 💻
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.44.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system("pip install 'https://modelscope-studios.oss-cn-zhangjiakou.aliyuncs.com/SDK/gradio/gradio-4.44.0-py3-none-any.whl?OSSAccessKeyId=LTAI5tCGZWFdkWKivGKCtvTD&Expires=361727611665&Signature=iynlOFVFiaF3OmxatNMHUBPfb3o%3D'")
3
+ os.system("pip install starlette==0.38.6 fastapi==0.112.4")
4
+
5
+ from typing import List, Tuple, Union
6
+ from web_ui import WebUI
7
+ import math
8
+
9
+ from qwen_agent.agents import Assistant
10
+ from qwen_agent.tools.base import register_tool
11
+ from qwen_agent.tools.doc_parser import Record
12
+ from qwen_agent.tools.search_tools.base_search import RefMaterialOutput, BaseSearch
13
+ from qwen_agent.log import logger
14
+ from qwen_agent.gui.gradio import gr
15
+
16
+ POSITIVE_INFINITY = math.inf
17
+
18
+ @register_tool('no_search')
19
+ class NoSearch(BaseSearch):
20
+ def call(self, params: Union[str, dict], docs: List[Union[Record, str, List[str]]] = None, **kwargs) -> list:
21
+ """The basic search algorithm
22
+
23
+ Args:
24
+ params: The dict parameters.
25
+ docs: The list of parsed doc, each doc has unique url.
26
+
27
+ Returns:
28
+ The list of retrieved chunks from each doc.
29
+
30
+ """
31
+ params = self._verify_json_format_args(params)
32
+ # Compatible with the parameter passing of the qwen-agent version <= 0.0.3
33
+ max_ref_token = kwargs.get('max_ref_token', self.max_ref_token)
34
+
35
+ # The query is a string that may contain only the original question,
36
+ # or it may be a json string containing the generated keywords and the original question
37
+ if not docs:
38
+ return []
39
+ return self._get_the_front_part(docs, max_ref_token)
40
+
41
+ @staticmethod
42
+ def _get_the_front_part(docs: List[Record], max_ref_token: int) -> list:
43
+ all_tokens = 0
44
+ _ref_list = []
45
+ for doc in docs:
46
+ text = []
47
+ for page in doc.raw:
48
+ text.append(page.content)
49
+ all_tokens += page.token
50
+ now_ref_list = RefMaterialOutput(url=doc.url, text=text).to_dict()
51
+ _ref_list.append(now_ref_list)
52
+
53
+ logger.info(f'Using tokens: {all_tokens}')
54
+ if all_tokens > max_ref_token:
55
+ raise gr.Error(f"Your document files (around {all_tokens} tokens) exceed the maximum context length ({max_ref_token} tokens).")
56
+ return _ref_list
57
+
58
+ def sort_by_scores(self,
59
+ query: str,
60
+ docs: List[Record],
61
+ max_ref_token: int,
62
+ **kwargs) -> List[Tuple[str, int, float]]:
63
+ raise NotImplementedError
64
+
65
+ def app_gui():
66
+ # Define the agent
67
+ bot_7b = Assistant(llm={
68
+ 'model': 'qwen2.5-7b-instruct-1m',
69
+ 'generate_cfg': {
70
+ 'max_input_tokens': 1000000,
71
+ 'max_retries': 10,
72
+ }},
73
+ name='Qwen2.5-7B-Instruct-1M',
74
+ description='Qwen2.5-7B-Instruct-1M natively supports input length of up to 1M tokens. You can upload documents for Q&A (eg., pdf/docx/pptx/txt/html).',
75
+ rag_cfg={'max_ref_token': 1000000, 'rag_searchers': ['no_search']},
76
+ )
77
+
78
+ bot_14b = Assistant(llm={
79
+ 'model': 'qwen2.5-14b-instruct-1m',
80
+ 'generate_cfg': {
81
+ 'max_input_tokens': 1000000,
82
+ 'max_retries': 10,
83
+ }},
84
+ name='Qwen2.5-14B-Instruct-1M',
85
+ description='Qwen2.5-14B-Instruct-1M natively supports input length of up to 1M tokens. You can upload documents for Q&A (eg., pdf/docx/pptx/txt/html).',
86
+ rag_cfg={'max_ref_token': 1000000, 'rag_searchers': ['no_search']},
87
+ )
88
+
89
+
90
+
91
+ bot_turbo = Assistant(llm={
92
+ 'model': 'qwen-turbo-latest',
93
+ 'generate_cfg': {
94
+ 'max_input_tokens': 1000000,
95
+ 'max_retries': 10,
96
+ }},
97
+ name='Qwen2.5-Turbo-1M',
98
+ description='Qwen2.5-Turbo natively supports input length of up to 1M tokens. You can upload documents for Q&A (eg., pdf/docx/pptx/txt/html).',
99
+ rag_cfg={'max_ref_token': 1000000, 'rag_searchers': ['no_search']},
100
+ )
101
+ chatbot_config = {
102
+ 'input.placeholder': "Type \"/clear\" to clear the history",
103
+ 'verbose': True,
104
+ }
105
+ WebUI([bot_14b, bot_7b, bot_turbo], chatbot_config=chatbot_config).run()
106
+
107
+ if __name__ == '__main__':
108
+ import patching # patch qwen-agent to accelerate 1M processing
109
+ app_gui()
assets/app.css ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* code highlight: https://python-markdown.github.io/extensions/code_hilite/ */
2
+ .codehilite .hll { background-color: #ffffcc }
3
+ .codehilite { background: #f8f8f8; }
4
+ .codehilite .c { color: #408080; font-style: italic } /* Comment */
5
+ .codehilite .err { border: 1px solid #FF0000 } /* Error */
6
+ .codehilite .k { color: #008000; font-weight: bold } /* Keyword */
7
+ .codehilite .o { color: #666666 } /* Operator */
8
+ .codehilite .ch { color: #408080; font-style: italic } /* Comment.Hashbang */
9
+ .codehilite .cm { color: #408080; font-style: italic } /* Comment.Multiline */
10
+ .codehilite .cp { color: #BC7A00 } /* Comment.Preproc */
11
+ .codehilite .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */
12
+ .codehilite .c1 { color: #408080; font-style: italic } /* Comment.Single */
13
+ .codehilite .cs { color: #408080; font-style: italic } /* Comment.Special */
14
+ .codehilite .gd { color: #A00000 } /* Generic.Deleted */
15
+ .codehilite .ge { font-style: italic } /* Generic.Emph */
16
+ .codehilite .gr { color: #FF0000 } /* Generic.Error */
17
+ .codehilite .gh { color: #000080; font-weight: bold } /* Generic.Heading */
18
+ .codehilite .gi { color: #00A000 } /* Generic.Inserted */
19
+ .codehilite .go { color: #888888 } /* Generic.Output */
20
+ .codehilite .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
21
+ .codehilite .gs { font-weight: bold } /* Generic.Strong */
22
+ .codehilite .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
23
+ .codehilite .gt { color: #0044DD } /* Generic.Traceback */
24
+ .codehilite .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
25
+ .codehilite .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
26
+ .codehilite .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
27
+ .codehilite .kp { color: #008000 } /* Keyword.Pseudo */
28
+ .codehilite .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
29
+ .codehilite .kt { color: #B00040 } /* Keyword.Type */
30
+ .codehilite .m { color: #666666 } /* Literal.Number */
31
+ .codehilite .s { color: #BA2121 } /* Literal.String */
32
+ .codehilite .na { color: #7D9029 } /* Name.Attribute */
33
+ .codehilite .nb { color: #008000 } /* Name.Builtin */
34
+ .codehilite .nc { color: #0000FF; font-weight: bold } /* Name.Class */
35
+ .codehilite .no { color: #880000 } /* Name.Constant */
36
+ .codehilite .nd { color: #AA22FF } /* Name.Decorator */
37
+ .codehilite .ni { color: #999999; font-weight: bold } /* Name.Entity */
38
+ .codehilite .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
39
+ .codehilite .nf { color: #0000FF } /* Name.Function */
40
+ .codehilite .nl { color: #A0A000 } /* Name.Label */
41
+ .codehilite .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
42
+ .codehilite .nt { color: #008000; font-weight: bold } /* Name.Tag */
43
+ .codehilite .nv { color: #19177C } /* Name.Variable */
44
+ .codehilite .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
45
+ .codehilite .w { color: #bbbbbb } /* Text.Whitespace */
46
+ .codehilite .mb { color: #666666 } /* Literal.Number.Bin */
47
+ .codehilite .mf { color: #666666 } /* Literal.Number.Float */
48
+ .codehilite .mh { color: #666666 } /* Literal.Number.Hex */
49
+ .codehilite .mi { color: #666666 } /* Literal.Number.Integer */
50
+ .codehilite .mo { color: #666666 } /* Literal.Number.Oct */
51
+ .codehilite .sa { color: #BA2121 } /* Literal.String.Affix */
52
+ .codehilite .sb { color: #BA2121 } /* Literal.String.Backtick */
53
+ .codehilite .sc { color: #BA2121 } /* Literal.String.Char */
54
+ .codehilite .dl { color: #BA2121 } /* Literal.String.Delimiter */
55
+ .codehilite .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
56
+ .codehilite .s2 { color: #BA2121 } /* Literal.String.Double */
57
+ .codehilite .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
58
+ .codehilite .sh { color: #BA2121 } /* Literal.String.Heredoc */
59
+ .codehilite .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
60
+ .codehilite .sx { color: #008000 } /* Literal.String.Other */
61
+ .codehilite .sr { color: #BB6688 } /* Literal.String.Regex */
62
+ .codehilite .s1 { color: #BA2121 } /* Literal.String.Single */
63
+ .codehilite .ss { color: #19177C } /* Literal.String.Symbol */
64
+ .codehilite .bp { color: #008000 } /* Name.Builtin.Pseudo */
65
+ .codehilite .fm { color: #0000FF } /* Name.Function.Magic */
66
+ .codehilite .vc { color: #19177C } /* Name.Variable.Class */
67
+ .codehilite .vg { color: #19177C } /* Name.Variable.Global */
68
+ .codehilite .vi { color: #19177C } /* Name.Variable.Instance */
69
+ .codehilite .vm { color: #19177C } /* Name.Variable.Magic */
70
+ .codehilite .il { color: #666666 } /* Literal.Number.Integer.Long */
71
+
72
+ .preview_header {
73
+ font-size: 18px;
74
+ font-weight: 500;
75
+ text-align: center;
76
+ margin-bottom: -12px;
77
+ }
78
+
79
+ .bot_cover {
80
+ display: flex;
81
+ flex-direction: column;
82
+ justify-content: center;
83
+ align-items: center;
84
+ min-height: 650px;
85
+ border: 1px solid rgb(229, 231, 235);
86
+ border-radius: 8px;
87
+ padding: 20px 40px;
88
+ }
89
+
90
+ .bot_avatar {
91
+ width: 100px;
92
+ height: 100px;
93
+ border-radius: 50%;
94
+ overflow: hidden;
95
+ }
96
+
97
+ .bot_avatar img {
98
+ width: 100px;
99
+ height: 100px;
100
+ }
101
+
102
+ .bot_name {
103
+ font-size: 24px;
104
+ margin-top: 10px;
105
+ }
106
+
107
+ .bot_desp {
108
+ color: #ddd;
109
+ }
110
+
111
+ .publish_link_container > a {
112
+ display: block;
113
+ border-radius: var(--button-large-radius);
114
+ padding: var(--button-large-padding);
115
+ font-weight: var(--button-large-text-weight);
116
+ font-size: var(--button-large-text-size);
117
+ border: var(--button-border-width) solid var(--button-secondary-border-color);
118
+ background: var(--button-secondary-background-fill);
119
+ color: var(--button-secondary-text-color) !important;
120
+ cursor: pointer;
121
+ text-decoration: none !important;
122
+ text-align: center;
123
+ }
124
+
125
+ .publish_link_container > .disabled {
126
+ cursor: not-allowed;
127
+ opacity: .5;
128
+ filter: grayscale(30%);
129
+ }
130
+
131
+ .markdown-body .message {
132
+ white-space: pre-wrap;
133
+ }
134
+
135
+ .markdown-body details {
136
+ white-space: nowrap;
137
+ }
138
+ .markdown-body .bot details:not(:last-child) {
139
+ margin-bottom: 1px;
140
+ }
141
+ .markdown-body summary {
142
+ background-color: #4b5563;
143
+ color: #eee;
144
+ padding: 0 4px;
145
+ border-radius: 4px;
146
+ font-size: 0.9em;
147
+ }
assets/appBot.css ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* code highlight: https://python-markdown.github.io/extensions/code_hilite/ */
2
+ .codehilite .hll { background-color: #ffffcc }
3
+ .codehilite { background: #f8f8f8; }
4
+ .codehilite .c { color: #408080; font-style: italic } /* Comment */
5
+ .codehilite .err { border: 1px solid #FF0000 } /* Error */
6
+ .codehilite .k { color: #008000; font-weight: bold } /* Keyword */
7
+ .codehilite .o { color: #666666 } /* Operator */
8
+ .codehilite .ch { color: #408080; font-style: italic } /* Comment.Hashbang */
9
+ .codehilite .cm { color: #408080; font-style: italic } /* Comment.Multiline */
10
+ .codehilite .cp { color: #BC7A00 } /* Comment.Preproc */
11
+ .codehilite .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */
12
+ .codehilite .c1 { color: #408080; font-style: italic } /* Comment.Single */
13
+ .codehilite .cs { color: #408080; font-style: italic } /* Comment.Special */
14
+ .codehilite .gd { color: #A00000 } /* Generic.Deleted */
15
+ .codehilite .ge { font-style: italic } /* Generic.Emph */
16
+ .codehilite .gr { color: #FF0000 } /* Generic.Error */
17
+ .codehilite .gh { color: #000080; font-weight: bold } /* Generic.Heading */
18
+ .codehilite .gi { color: #00A000 } /* Generic.Inserted */
19
+ .codehilite .go { color: #888888 } /* Generic.Output */
20
+ .codehilite .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
21
+ .codehilite .gs { font-weight: bold } /* Generic.Strong */
22
+ .codehilite .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
23
+ .codehilite .gt { color: #0044DD } /* Generic.Traceback */
24
+ .codehilite .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
25
+ .codehilite .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
26
+ .codehilite .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
27
+ .codehilite .kp { color: #008000 } /* Keyword.Pseudo */
28
+ .codehilite .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
29
+ .codehilite .kt { color: #B00040 } /* Keyword.Type */
30
+ .codehilite .m { color: #666666 } /* Literal.Number */
31
+ .codehilite .s { color: #BA2121 } /* Literal.String */
32
+ .codehilite .na { color: #7D9029 } /* Name.Attribute */
33
+ .codehilite .nb { color: #008000 } /* Name.Builtin */
34
+ .codehilite .nc { color: #0000FF; font-weight: bold } /* Name.Class */
35
+ .codehilite .no { color: #880000 } /* Name.Constant */
36
+ .codehilite .nd { color: #AA22FF } /* Name.Decorator */
37
+ .codehilite .ni { color: #999999; font-weight: bold } /* Name.Entity */
38
+ .codehilite .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
39
+ .codehilite .nf { color: #0000FF } /* Name.Function */
40
+ .codehilite .nl { color: #A0A000 } /* Name.Label */
41
+ .codehilite .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
42
+ .codehilite .nt { color: #008000; font-weight: bold } /* Name.Tag */
43
+ .codehilite .nv { color: #19177C } /* Name.Variable */
44
+ .codehilite .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
45
+ .codehilite .w { color: #bbbbbb } /* Text.Whitespace */
46
+ .codehilite .mb { color: #666666 } /* Literal.Number.Bin */
47
+ .codehilite .mf { color: #666666 } /* Literal.Number.Float */
48
+ .codehilite .mh { color: #666666 } /* Literal.Number.Hex */
49
+ .codehilite .mi { color: #666666 } /* Literal.Number.Integer */
50
+ .codehilite .mo { color: #666666 } /* Literal.Number.Oct */
51
+ .codehilite .sa { color: #BA2121 } /* Literal.String.Affix */
52
+ .codehilite .sb { color: #BA2121 } /* Literal.String.Backtick */
53
+ .codehilite .sc { color: #BA2121 } /* Literal.String.Char */
54
+ .codehilite .dl { color: #BA2121 } /* Literal.String.Delimiter */
55
+ .codehilite .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
56
+ .codehilite .s2 { color: #BA2121 } /* Literal.String.Double */
57
+ .codehilite .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
58
+ .codehilite .sh { color: #BA2121 } /* Literal.String.Heredoc */
59
+ .codehilite .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
60
+ .codehilite .sx { color: #008000 } /* Literal.String.Other */
61
+ .codehilite .sr { color: #BB6688 } /* Literal.String.Regex */
62
+ .codehilite .s1 { color: #BA2121 } /* Literal.String.Single */
63
+ .codehilite .ss { color: #19177C } /* Literal.String.Symbol */
64
+ .codehilite .bp { color: #008000 } /* Name.Builtin.Pseudo */
65
+ .codehilite .fm { color: #0000FF } /* Name.Function.Magic */
66
+ .codehilite .vc { color: #19177C } /* Name.Variable.Class */
67
+ .codehilite .vg { color: #19177C } /* Name.Variable.Global */
68
+ .codehilite .vi { color: #19177C } /* Name.Variable.Instance */
69
+ .codehilite .vm { color: #19177C } /* Name.Variable.Magic */
70
+ .codehilite .il { color: #666666 } /* Literal.Number.Integer.Long */
71
+
72
+ .preview_header {
73
+ font-size: 24px;
74
+ font-weight: 500;
75
+ text-align: center;
76
+ }
77
+
78
+ .bot_cover {
79
+ display: flex;
80
+ flex-direction: column;
81
+ justify-content: center;
82
+ align-items: center;
83
+ min-height: 300px;
84
+ border: 1px solid rgb(229, 231, 235);
85
+ padding: 20px 20px;
86
+ }
87
+
88
+ .bot_avatar {
89
+ width: 100px;
90
+ height: 100px;
91
+ border-radius: 50%;
92
+ overflow: hidden;
93
+ }
94
+
95
+ .bot_avatar img {
96
+ width: 100px;
97
+ height: 100px;
98
+ }
99
+
100
+ .bot_name {
101
+ font-size: 24px;
102
+ margin-top: 10px;
103
+ }
104
+
105
+ /* .bot_desp {
106
+ color: #ddd;
107
+ } */
108
+
109
+ .container {
110
+ /* flex-direction: row-reverse; */
111
+ }
112
+
113
+ .markdown-body .message {
114
+ white-space: pre-wrap;
115
+ }
116
+
117
+ .markdown-body details {
118
+ white-space: nowrap;
119
+ }
120
+ .markdown-body .bot details:not(:last-child) {
121
+ margin-bottom: 1px;
122
+ }
123
+ .markdown-body summary {
124
+ background-color: #4b5563;
125
+ color: #eee;
126
+ padding: 0 4px;
127
+ border-radius: 4px;
128
+ font-size: 0.9em;
129
+ }
assets/logo.jpeg ADDED
assets/user.jpeg ADDED
patching.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from typing import List, Union, Iterator
4
+ from http import HTTPStatus
5
+ from time import time
6
+ import time
7
+ import json
8
+
9
+
10
+ from qwen_agent.agents import Assistant
11
+ from qwen_agent.agents import assistant
12
+ from qwen_agent.agents.assistant import Assistant, get_basename_from_url
13
+ from qwen_agent.memory.memory import Memory
14
+ from qwen_agent.llm.schema import ASSISTANT, USER, Message, SYSTEM, CONTENT
15
+ from qwen_agent.llm.qwen_dashscope import QwenChatAtDS
16
+ import qwen_agent.llm.base
17
+ from qwen_agent.llm.base import ModelServiceError
18
+ from qwen_agent.utils.utils import extract_text_from_message, print_traceback
19
+ from qwen_agent.utils.tokenization_qwen import count_tokens, tokenizer
20
+ from qwen_agent.utils.utils import (get_file_type, hash_sha256, is_http_url,
21
+ sanitize_chrome_file_path, save_url_to_local_work_dir)
22
+ from qwen_agent.log import logger
23
+ from qwen_agent.gui.gradio import gr
24
+ from qwen_agent.tools.storage import KeyNotExistsError
25
+ from qwen_agent.tools.simple_doc_parser import (SimpleDocParser, PARSER_SUPPORTED_FILE_TYPES, parse_pdf,
26
+ parse_word, parse_ppt, parse_txt, parse_html_bs, parse_csv,
27
+ parse_tsv, parse_excel, get_plain_doc)
28
+
29
+
30
+
31
+
32
+ def memory_run(self, messages: List[Message], lang: str = 'en', **kwargs) -> Iterator[List[Message]]:
33
+ """This agent is responsible for processing the input files in the message.
34
+
35
+ This method stores the files in the knowledge base, and retrievals the relevant parts
36
+ based on the query and returning them.
37
+ The currently supported file types include: .pdf, .docx, .pptx, .txt, .csv, .tsv, .xlsx, .xls and html.
38
+
39
+ Args:
40
+ messages: A list of messages.
41
+ lang: Language.
42
+
43
+ Yields:
44
+ The message of retrieved documents.
45
+ """
46
+ # process files in messages
47
+ rag_files = self.get_rag_files(messages)
48
+
49
+ if not rag_files:
50
+ yield [Message(role=ASSISTANT, content='', name='memory')]
51
+ else:
52
+ query = ''
53
+ # Only retrieval content according to the last user query if exists
54
+ if messages and messages[-1].role == USER:
55
+ query = extract_text_from_message(messages[-1], add_upload_info=False)
56
+
57
+ content = self.function_map['retrieval'].call(
58
+ {
59
+ 'query': query,
60
+ 'files': rag_files
61
+ },
62
+ **kwargs,
63
+ )
64
+ if not isinstance(content, str):
65
+ content = json.dumps(content, ensure_ascii=False, indent=4)
66
+
67
+ yield [Message(role=ASSISTANT, content=content, name='memory')]
68
+
69
+ Memory._run = memory_run
70
+
71
+ common_programming_language_extensions = [
72
+ "py", # Python
73
+ "java", # Java
74
+ "cpp", # C++
75
+ "c", # C
76
+ "h", # C/C++ 头文件
77
+ "cs", # C#
78
+ "js", # JavaScript
79
+ "ts", # TypeScript
80
+ "rb", # Ruby
81
+ "php", # PHP
82
+ "swift", # Swift
83
+ "go", # Go
84
+ "rs", # Rust
85
+ "kt", # Kotlin
86
+ "scala", # Scala
87
+ "m", # Objective-C
88
+ "css", # CSS
89
+ "sql", # SQL
90
+ "sh", # Shell
91
+ "pl", # Perl
92
+ "r", # R
93
+ "jl", # Julia
94
+ "dart", # Dart
95
+ "json", # JSON
96
+ "xml", # XML
97
+ "yml", # YAML
98
+ "toml", # TOML
99
+ ]
100
+
101
+ def SimpleDocParser_call(self, params: Union[str, dict], **kwargs) -> Union[str, list]:
102
+ params = self._verify_json_format_args(params)
103
+ path = params['url']
104
+ cached_name_ori = f'{hash_sha256(path)}_ori'
105
+ try:
106
+ # Directly load the parsed doc
107
+ parsed_file = self.db.get(cached_name_ori)
108
+ # [PATCH]: disable json5 for faster processing
109
+ # try:
110
+ # parsed_file = json5.loads(parsed_file)
111
+ # except ValueError:
112
+ # logger.warning(f'Encountered ValueError raised by json5. Fall back to json. File: {cached_name_ori}')
113
+ parsed_file = json.loads(parsed_file)
114
+ logger.info(f'Read parsed {path} from cache.')
115
+ except KeyNotExistsError:
116
+ logger.info(f'Start parsing {path}...')
117
+ time1 = time.time()
118
+
119
+ f_type = get_file_type(path)
120
+ if f_type in PARSER_SUPPORTED_FILE_TYPES + common_programming_language_extensions:
121
+ if path.startswith('https://') or path.startswith('http://') or re.match(
122
+ r'^[A-Za-z]:\\', path) or re.match(r'^[A-Za-z]:/', path):
123
+ path = path
124
+ else:
125
+ path = sanitize_chrome_file_path(path)
126
+
127
+ os.makedirs(self.data_root, exist_ok=True)
128
+ if is_http_url(path):
129
+ # download online url
130
+ tmp_file_root = os.path.join(self.data_root, hash_sha256(path))
131
+ os.makedirs(tmp_file_root, exist_ok=True)
132
+ path = save_url_to_local_work_dir(path, tmp_file_root)
133
+
134
+ if f_type == 'pdf':
135
+ parsed_file = parse_pdf(path, self.extract_image)
136
+ elif f_type == 'docx':
137
+ parsed_file = parse_word(path, self.extract_image)
138
+ elif f_type == 'pptx':
139
+ parsed_file = parse_ppt(path, self.extract_image)
140
+ elif f_type == 'txt' or f_type in common_programming_language_extensions:
141
+ parsed_file = parse_txt(path)
142
+ elif f_type == 'html':
143
+ parsed_file = parse_html_bs(path, self.extract_image)
144
+ elif f_type == 'csv':
145
+ parsed_file = parse_csv(path, self.extract_image)
146
+ elif f_type == 'tsv':
147
+ parsed_file = parse_tsv(path, self.extract_image)
148
+ elif f_type in ['xlsx', 'xls']:
149
+ parsed_file = parse_excel(path, self.extract_image)
150
+ else:
151
+ raise ValueError(
152
+ f'Failed: The current parser does not support this file type! Supported types: {"/".join(PARSER_SUPPORTED_FILE_TYPES + common_programming_language_extensions)}'
153
+ )
154
+ for page in parsed_file:
155
+ for para in page['content']:
156
+ # Todo: More attribute types
157
+ para['token'] = count_tokens(para.get('text', para.get('table')))
158
+ time2 = time.time()
159
+ logger.info(f'Finished parsing {path}. Time spent: {time2 - time1} seconds.')
160
+ # Cache the parsing doc
161
+ self.db.put(cached_name_ori, json.dumps(parsed_file, ensure_ascii=False, indent=2))
162
+
163
+ if not self.structured_doc:
164
+ return get_plain_doc(parsed_file)
165
+ else:
166
+ return parsed_file
167
+
168
+ SimpleDocParser.call = SimpleDocParser_call
169
+
170
+
171
+ def _truncate_input_messages_roughly(messages: List[Message], max_tokens: int) -> List[Message]:
172
+ sys_msg = messages[0]
173
+ assert sys_msg.role == SYSTEM # The default system is prepended if none exists
174
+ if len([m for m in messages if m.role == SYSTEM]) >= 2:
175
+ raise gr.Error(
176
+ 'The input messages must contain no more than one system message. '
177
+ ' And the system message, if exists, must be the first message.',
178
+ )
179
+
180
+ turns = []
181
+ for m in messages[1:]:
182
+ if m.role == USER:
183
+ turns.append([m])
184
+ else:
185
+ if turns:
186
+ turns[-1].append(m)
187
+ else:
188
+ raise gr.Error(
189
+ 'The input messages (excluding the system message) must start with a user message.',
190
+ )
191
+
192
+ def _count_tokens(msg: Message) -> int:
193
+ return tokenizer.count_tokens(extract_text_from_message(msg, add_upload_info=True))
194
+
195
+ token_cnt = _count_tokens(sys_msg)
196
+ truncated = []
197
+ for i, turn in enumerate(reversed(turns)):
198
+ cur_turn_msgs = []
199
+ cur_token_cnt = 0
200
+ for m in reversed(turn):
201
+ cur_turn_msgs.append(m)
202
+ cur_token_cnt += _count_tokens(m)
203
+ # Check "i == 0" so that at least one user message is included
204
+ # [PATCH] Do not do truncate for this demo
205
+ # if (i == 0) or (token_cnt + cur_token_cnt <= max_tokens):
206
+ truncated.extend(cur_turn_msgs)
207
+ token_cnt += cur_token_cnt
208
+ # else:
209
+ # break
210
+ # Always include the system message
211
+ truncated.append(sys_msg)
212
+ truncated.reverse()
213
+
214
+ if len(truncated) < 2: # one system message + one or more user messages
215
+ raise gr.Error(
216
+ code='400',
217
+ message='At least one user message should be provided.',
218
+ )
219
+ if token_cnt > max_tokens:
220
+ raise gr.Error(
221
+ f'The input messages (around {token_cnt} tokens) exceed the maximum context length ({max_tokens} tokens).'
222
+ )
223
+ return truncated
224
+
225
+ qwen_agent.llm.base._truncate_input_messages_roughly = _truncate_input_messages_roughly
226
+
227
+
228
+
229
+ def format_knowledge_to_source_and_content(result: Union[str, List[dict]]) -> List[dict]:
230
+ knowledge = []
231
+ if isinstance(result, str):
232
+ result = f'{result}'.strip()
233
+ try:
234
+ # [PATCH]: disable json5 for faster processing
235
+ docs = json.loads(result)
236
+ except Exception:
237
+ print_traceback()
238
+ knowledge.append({'source': '上传的文档', 'content': result})
239
+ return knowledge
240
+ else:
241
+ docs = result
242
+ try:
243
+ _tmp_knowledge = []
244
+ assert isinstance(docs, list)
245
+ for doc in docs:
246
+ url, snippets = doc['url'], doc['text']
247
+ assert isinstance(snippets, list)
248
+ _tmp_knowledge.append({
249
+ 'source': f'[文件]({get_basename_from_url(url)})',
250
+ 'content': '\n\n...\n\n'.join(snippets)
251
+ })
252
+ knowledge.extend(_tmp_knowledge)
253
+ except Exception:
254
+ print_traceback()
255
+ knowledge.append({'source': '上传的文档', 'content': result})
256
+ return knowledge
257
+
258
+ assistant.format_knowledge_to_source_and_content = format_knowledge_to_source_and_content
259
+
260
+
261
+ HINT_PATTERN = "\n<summary>input tokens: {input_tokens}, prefill time: [[<PrefillCost>]]s, output tokens: {output_tokens}, decode speed: [[<DecodeSpeed>]] tokens/s</summary>"
262
+
263
+ @staticmethod
264
+ def _full_stream_output(response):
265
+ for chunk in response:
266
+ if chunk.status_code == HTTPStatus.OK:
267
+ # [PATCH]: add speed statistics
268
+ yield [Message(ASSISTANT, chunk.output.choices[0].message.content + HINT_PATTERN.format(
269
+ input_tokens=chunk.usage.input_tokens,
270
+ output_tokens=chunk.usage.output_tokens,)
271
+ )]
272
+ else:
273
+ raise ModelServiceError(code=chunk.code, message=chunk.message)
274
+
275
+ QwenChatAtDS._full_stream_output = _full_stream_output
276
+
277
+ def assistant_run(self,
278
+ messages,
279
+ lang="en",
280
+ knowledge="",
281
+ **kwargs):
282
+
283
+ if any([len(message[CONTENT]) > 1 for message in messages]):
284
+ yield [Message(ASSISTANT, "Uploading and Parsing Files...")]
285
+ new_messages = self._prepend_knowledge_prompt(messages=messages, lang=lang, knowledge=knowledge, **kwargs)
286
+ start_prefill_time = time.time()
287
+
288
+ yield [Message(ASSISTANT, "Qwen is thinking...")]
289
+
290
+ start_decode_time = None
291
+ for chunk in super(Assistant, self)._run(messages=new_messages, lang=lang, **kwargs):
292
+
293
+ if start_decode_time is None:
294
+ end_prefill_time = time.time()
295
+ start_decode_time = time.time() - 0.5
296
+
297
+ # [PATCH]: compute speed statstics
298
+ pattern = re.search(HINT_PATTERN.format(input_tokens="\d+", output_tokens="(\d+)").replace("[", "\[").replace("]", "\]"), chunk[0][CONTENT])
299
+ if pattern:
300
+ output_tokens = int(pattern.group(1))
301
+ chunk[0][CONTENT] = chunk[0][CONTENT].replace("[[<PrefillCost>]]", "%.2f" % (end_prefill_time - start_prefill_time)).replace("[[<DecodeSpeed>]]", "%.2f" % (output_tokens/(time.time() - start_decode_time)))
302
+
303
+ yield chunk
304
+
305
+ Assistant._run = assistant_run
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ qwen-agent==0.0.10
2
+ qwen-agent[gui]
3
+ qwen-agent[rag]
web_ui.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pprint
3
+ import re
4
+ from typing import List, Optional, Union
5
+
6
+ from qwen_agent import Agent, MultiAgentHub
7
+ from qwen_agent.agents.user_agent import PENDING_USER_INPUT
8
+ from qwen_agent.gui.gradio_utils import format_cover_html
9
+ from qwen_agent.gui.utils import convert_fncall_to_text, convert_history_to_chatbot, get_avatar_image
10
+ from qwen_agent.llm.schema import CONTENT, FILE, IMAGE, NAME, ROLE, USER, Message
11
+ from qwen_agent.log import logger
12
+ from qwen_agent.utils.utils import print_traceback
13
+ from patching import common_programming_language_extensions
14
+
15
+ class WebUI:
16
+ """A Common chatbot application for agent."""
17
+
18
+ def __init__(self, agent: Union[Agent, MultiAgentHub, List[Agent]], chatbot_config: Optional[dict] = None):
19
+ """
20
+ Initialization the chatbot.
21
+
22
+ Args:
23
+ agent: The agent or a list of agents,
24
+ supports various types of agents such as Assistant, GroupChat, Router, etc.
25
+ chatbot_config: The chatbot configuration.
26
+ Set the configuration as {'user.name': '', 'user.avatar': '', 'agent.avatar': '', 'input.placeholder': '', 'prompt.suggestions': []}.
27
+ """
28
+ chatbot_config = chatbot_config or {}
29
+
30
+ if isinstance(agent, MultiAgentHub):
31
+ self.agent_list = [agent for agent in agent.nonuser_agents]
32
+ self.agent_hub = agent
33
+ elif isinstance(agent, list):
34
+ self.agent_list = agent
35
+ self.agent_hub = None
36
+ else:
37
+ self.agent_list = [agent]
38
+ self.agent_hub = None
39
+
40
+ user_name = chatbot_config.get('user.name', 'user')
41
+ self.user_config = {
42
+ 'name': user_name,
43
+ 'avatar': chatbot_config.get(
44
+ 'user.avatar',
45
+ get_avatar_image(user_name),
46
+ ),
47
+ }
48
+
49
+ self.agent_config_list = [{
50
+ 'name': agent.name,
51
+ 'avatar': chatbot_config.get(
52
+ 'agent.avatar',
53
+ os.path.join(os.path.dirname(__file__), 'assets/logo.jpeg'),
54
+ ),
55
+ 'description': agent.description or "I'm a helpful assistant.",
56
+ } for agent in self.agent_list]
57
+
58
+ self.input_placeholder = chatbot_config.get('input.placeholder', '跟我聊聊吧~')
59
+ self.prompt_suggestions = chatbot_config.get('prompt.suggestions', [])
60
+ self.verbose = chatbot_config.get('verbose', False)
61
+
62
+ """
63
+ Run the chatbot.
64
+
65
+ Args:
66
+ messages: The chat history.
67
+ """
68
+
69
+ def run(self,
70
+ messages: List[Message] = None,
71
+ share: bool = False,
72
+ server_name: str = None,
73
+ server_port: int = None,
74
+ concurrency_limit: int = 10,
75
+ enable_mention: bool = False,
76
+ **kwargs):
77
+ self.run_kwargs = kwargs
78
+
79
+ from qwen_agent.gui.gradio import gr, mgr
80
+
81
+ customTheme = gr.themes.Default(
82
+ primary_hue=gr.themes.utils.colors.blue,
83
+ radius_size=gr.themes.utils.sizes.radius_none,
84
+ )
85
+
86
+ with gr.Blocks(
87
+ css=os.path.join(os.path.dirname(__file__), 'assets/appBot.css'),
88
+ theme=customTheme,
89
+ ) as demo:
90
+ history = gr.State([])
91
+
92
+ with gr.Row(elem_classes='container'):
93
+ with gr.Column(scale=4):
94
+ chatbot = mgr.Chatbot(value=convert_history_to_chatbot(messages=messages),
95
+ avatar_images=[
96
+ self.user_config,
97
+ self.agent_config_list,
98
+ ],
99
+ height=800,
100
+ avatar_image_width=80,
101
+ flushing=False,
102
+ show_copy_button=True,
103
+ latex_delimiters=[{
104
+ 'left': '\\(',
105
+ 'right': '\\)',
106
+ 'display': True
107
+ }, {
108
+ 'left': '\\begin{equation}',
109
+ 'right': '\\end{equation}',
110
+ 'display': True
111
+ }, {
112
+ 'left': '\\begin{align}',
113
+ 'right': '\\end{align}',
114
+ 'display': True
115
+ }, {
116
+ 'left': '\\begin{alignat}',
117
+ 'right': '\\end{alignat}',
118
+ 'display': True
119
+ }, {
120
+ 'left': '\\begin{gather}',
121
+ 'right': '\\end{gather}',
122
+ 'display': True
123
+ }, {
124
+ 'left': '\\begin{CD}',
125
+ 'right': '\\end{CD}',
126
+ 'display': True
127
+ }, {
128
+ 'left': '\\[',
129
+ 'right': '\\]',
130
+ 'display': True
131
+ }])
132
+
133
+ input = mgr.MultimodalInput(placeholder=self.input_placeholder, upload_button_props=dict(file_types=[".pdf", ".docx", ".pptx", ".txt", ".html", ".csv", ".tsv", ".xlsx", ".xls"] + ["." + file_type for file_type in common_programming_language_extensions]))
134
+
135
+ with gr.Column(scale=1):
136
+ if len(self.agent_list) > 1:
137
+ agent_selector = gr.Radio(
138
+ [(agent.name, i) for i, agent in enumerate(self.agent_list)],
139
+ label='Models',
140
+ info='Select Your Model Here',
141
+ value=0,
142
+ interactive=True,
143
+ )
144
+
145
+ # gr.Dropdown(
146
+ # [(agent.name, i) for i, agent in enumerate(self.agent_list)],
147
+ # label='Models',
148
+ # info='Select Your Model Here',
149
+ # value=0,
150
+ # interactive=True,
151
+ # )
152
+
153
+ agent_info_block = self._create_agent_info_block()
154
+
155
+ # agent_plugins_block = self._create_agent_plugins_block()
156
+
157
+ if self.prompt_suggestions:
158
+ gr.Examples(
159
+ label='推荐对话',
160
+ examples=self.prompt_suggestions,
161
+ inputs=[input],
162
+ )
163
+
164
+ if len(self.agent_list) > 1:
165
+ agent_selector.change(
166
+ fn=self.change_agent,
167
+ inputs=[agent_selector, chatbot, history],
168
+ outputs=[agent_selector, agent_info_block, chatbot, history], #, agent_plugins_block],
169
+ queue=False,
170
+ )
171
+
172
+ input_promise = input.submit(
173
+ fn=self.add_text,
174
+ inputs=[input, chatbot, history],
175
+ outputs=[input, chatbot, history, agent_selector],
176
+ queue=False,
177
+ )
178
+
179
+ if len(self.agent_list) > 1 and enable_mention:
180
+ input_promise = input_promise.then(
181
+ self.add_mention,
182
+ [chatbot, agent_selector],
183
+ [chatbot, agent_selector],
184
+ ).then(
185
+ self.agent_run,
186
+ [chatbot, history, agent_selector],
187
+ [chatbot, history, agent_selector],
188
+ )
189
+ else:
190
+ input_promise = input_promise.then(
191
+ self.agent_run,
192
+ [chatbot, history],
193
+ [chatbot, history],
194
+ )
195
+
196
+ input_promise.then(self.flushed, None, [input, agent_selector])
197
+
198
+ demo.load(None)
199
+
200
+ demo.queue(default_concurrency_limit=concurrency_limit).launch(share=share,
201
+ server_name=server_name,
202
+ server_port=server_port)
203
+
204
+ def change_agent(self, agent_selector, _chatbot, _history):
205
+ _chatbot = []
206
+ _history.clear()
207
+ yield agent_selector, self._create_agent_info_block(agent_selector), _chatbot, _history #, self._create_agent_plugins_block(agent_selector)
208
+
209
+ def add_text(self, _input, _chatbot, _history):
210
+ from qwen_agent.gui.gradio import gr
211
+ if _input.text == "/clear":
212
+ _chatbot = []
213
+ _history.clear()
214
+ yield gr.update(interactive=False, value=""), _chatbot, _history, gr.update(interactive=False)
215
+ return
216
+
217
+ _history.append({
218
+ ROLE: USER,
219
+ CONTENT: [{
220
+ 'text': _input.text
221
+ }],
222
+ })
223
+
224
+ if self.user_config[NAME]:
225
+ _history[-1][NAME] = self.user_config[NAME]
226
+
227
+ if _input.files:
228
+ for file in _input.files:
229
+ if file.mime_type.startswith('image/'):
230
+ _history[-1][CONTENT].append({IMAGE: 'file://' + file.path})
231
+ else:
232
+ _history[-1][CONTENT].append({FILE: file.path})
233
+
234
+ _chatbot.append([_input, None])
235
+
236
+ yield gr.update(interactive=False, value=None), _chatbot, _history, gr.update(interactive=False)
237
+
238
+ def add_mention(self, _chatbot, _agent_selector):
239
+ if len(self.agent_list) == 1:
240
+ yield _chatbot, _agent_selector
241
+
242
+ query = _chatbot[-1][0].text
243
+ match = re.search(r'@\w+\b', query)
244
+ if match:
245
+ _agent_selector = self._get_agent_index_by_name(match.group()[1:])
246
+
247
+ agent_name = self.agent_list[_agent_selector].name
248
+
249
+ if ('@' + agent_name) not in query and self.agent_hub is None:
250
+ _chatbot[-1][0].text = '@' + agent_name + ' ' + query
251
+
252
+ yield _chatbot, _agent_selector
253
+
254
+ def agent_run(self, _chatbot, _history, _agent_selector=None):
255
+ if not _history:
256
+ if _agent_selector is not None:
257
+ yield _chatbot, _history, _agent_selector
258
+ else:
259
+ yield _chatbot, _history
260
+ return
261
+
262
+
263
+ if self.verbose:
264
+ logger.info('agent_run input:\n' + pprint.pformat(_history, indent=2))
265
+
266
+ num_input_bubbles = len(_chatbot) - 1
267
+ num_output_bubbles = 1
268
+ _chatbot[-1][1] = [None for _ in range(len(self.agent_list))]
269
+
270
+ agent_runner = self.agent_list[_agent_selector or 0]
271
+ if self.agent_hub:
272
+ agent_runner = self.agent_hub
273
+ responses = []
274
+ for responses in agent_runner.run(_history, **self.run_kwargs):
275
+ # usage = responses.usage
276
+ # responses = [Message(ASSISTANT, responses.output.choices[0].message.content)]
277
+
278
+ if not responses:
279
+ continue
280
+ if responses[-1][CONTENT] == PENDING_USER_INPUT:
281
+ logger.info('Interrupted. Waiting for user input!')
282
+ break
283
+
284
+ display_responses = convert_fncall_to_text(responses)
285
+ # display_responses[-1][CONTENT] += "\n<summary>" + repr({"usage": usage}) + "</summary>"
286
+ if not display_responses:
287
+ continue
288
+ if display_responses[-1][CONTENT] is None:
289
+ continue
290
+
291
+ while len(display_responses) > num_output_bubbles:
292
+ # Create a new chat bubble
293
+ _chatbot.append([None, None])
294
+ _chatbot[-1][1] = [None for _ in range(len(self.agent_list))]
295
+ num_output_bubbles += 1
296
+
297
+ assert num_output_bubbles == len(display_responses)
298
+ assert num_input_bubbles + num_output_bubbles == len(_chatbot)
299
+
300
+ for i, rsp in enumerate(display_responses):
301
+ agent_index = self._get_agent_index_by_name(rsp[NAME])
302
+ _chatbot[num_input_bubbles + i][1][agent_index] = rsp[CONTENT]
303
+
304
+ if len(self.agent_list) > 1:
305
+ _agent_selector = agent_index
306
+
307
+ if _agent_selector is not None:
308
+ yield _chatbot, _history, _agent_selector
309
+ else:
310
+ yield _chatbot, _history
311
+
312
+ if responses:
313
+ for res in responses:
314
+ res['content'] = re.sub(r"\n<summary>input tokens.*</summary>", "", res['content'])
315
+ _history.extend([res for res in responses if res[CONTENT] != PENDING_USER_INPUT])
316
+
317
+ if _agent_selector is not None:
318
+ yield _chatbot, _history, _agent_selector
319
+ else:
320
+ yield _chatbot, _history
321
+
322
+ if self.verbose:
323
+ logger.info('agent_run response:\n' + pprint.pformat(responses, indent=2))
324
+
325
+ def flushed(self):
326
+ from qwen_agent.gui.gradio import gr
327
+
328
+ return gr.update(interactive=True), gr.update(interactive=True)
329
+
330
+ def _get_agent_index_by_name(self, agent_name):
331
+ if agent_name is None:
332
+ return 0
333
+
334
+ try:
335
+ agent_name = agent_name.strip()
336
+ for i, agent in enumerate(self.agent_list):
337
+ if agent.name == agent_name:
338
+ return i
339
+ return 0
340
+ except Exception:
341
+ print_traceback()
342
+ return 0
343
+
344
+ def _create_agent_info_block(self, agent_index=0):
345
+ from qwen_agent.gui.gradio import gr
346
+
347
+ agent_config_interactive = self.agent_config_list[agent_index]
348
+
349
+ return gr.HTML(
350
+ format_cover_html(
351
+ bot_name=agent_config_interactive['name'],
352
+ bot_description=agent_config_interactive['description'],
353
+ bot_avatar=agent_config_interactive['avatar'],
354
+ ))
355
+
356
+ def _create_agent_plugins_block(self, agent_index=0):
357
+ from qwen_agent.gui.gradio import gr
358
+
359
+ agent_interactive = self.agent_list[agent_index]
360
+
361
+ if agent_interactive.function_map:
362
+ capabilities = [key for key in agent_interactive.function_map.keys()]
363
+ return gr.CheckboxGroup(
364
+ label='插件',
365
+ value=capabilities,
366
+ choices=capabilities,
367
+ interactive=False,
368
+ )
369
+
370
+ else:
371
+ return gr.CheckboxGroup(
372
+ label='插件',
373
+ value=[],
374
+ choices=[],
375
+ interactive=False,
376
+ )