99i commited on
Commit
14f8b73
·
verified ·
1 Parent(s): 5c473f8

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +7 -214
config.json CHANGED
@@ -1,218 +1,11 @@
1
  {
2
- "systemEnv": {
3
- "openapiPrefix": "fastgpt",
4
- "vectorMaxProcess": 10,
5
- "qaMaxProcess": 10,
6
- "pgHNSWEfSearch": 80
7
  },
8
- "llmModels": [
9
- {
10
- "model": "Qwen/Qwen2.5-72B-Instruct-128K",
11
- "name": "Qwen2.5-72B-Instruct",
12
- "provider": "Qwen",
13
- "maxContext": 128000,
14
- "maxResponse": 8192,
15
- "quoteMaxToken": 8192,
16
- "maxTemperature": 1.0,
17
- "inputPrice": 0,
18
- "outputPrice": 0,
19
- "censor": false,
20
- "vision": false,
21
- "datasetProcess": true,
22
- "usedInClassify": true,
23
- "usedInExtractFields": true,
24
- "usedInToolCall": true,
25
- "usedInQueryExtension": true,
26
- "toolChoice": true,
27
- "functionCall": true,
28
- "customCQPrompt": "",
29
- "customExtractPrompt": "",
30
- "defaultSystemChatPrompt": "",
31
- "defaultConfig": {}
32
- },
33
- {
34
- "model": "deepseek-ai/DeepSeek-V3",
35
- "name": "deepseek-ai/DeepSeek-V3",
36
- "provider": "DeepSeek",
37
- "maxContext": 64000,
38
- "maxResponse": 8192,
39
- "quoteMaxToken": 4096,
40
- "maxTemperature": 1.0,
41
- "inputPrice": 0,
42
- "outputPrice": 0,
43
- "censor": false,
44
- "vision": false,
45
- "datasetProcess": true,
46
- "usedInClassify": true,
47
- "usedInExtractFields": true,
48
- "usedInToolCall": true,
49
- "usedInQueryExtension": true,
50
- "toolChoice": true,
51
- "functionCall": true,
52
- "customCQPrompt": "",
53
- "customExtractPrompt": "",
54
- "defaultSystemChatPrompt": "",
55
- "defaultConfig": {}
56
- },
57
- {
58
- "model": "Qwen/Qwen2.5-Coder-32B-Instruct",
59
- "name": "Qwen/Qwen2.5-Coder-32B-Instruct",
60
- "provider": "Qwen",
61
- "maxContext": 32000,
62
- "maxResponse": 8192,
63
- "quoteMaxToken": 128000,
64
- "maxTemperature": 1.0,
65
- "inputPrice": 0,
66
- "outputPrice": 0,
67
- "censor": false,
68
- "vision": true,
69
- "datasetProcess": true,
70
- "usedInClassify": true,
71
- "usedInExtractFields": true,
72
- "usedInToolCall": true,
73
- "usedInQueryExtension": true,
74
- "toolChoice": true,
75
- "functionCall": true,
76
- "customCQPrompt": "",
77
- "customExtractPrompt": "",
78
- "defaultSystemChatPrompt": "",
79
- "defaultConfig": {}
80
- },
81
- {
82
- "model": "Qwen/Qwen2.5-32B-Instruct",
83
- "name": "Qwen2.5-32B-Instruct",
84
- "provider": "Qwen",
85
- "maxContext": 32000,
86
- "maxResponse": 4096,
87
- "quoteMaxToken": 32000,
88
- "maxTemperature": 1.0,
89
- "inputPrice": 0,
90
- "outputPrice": 0,
91
- "censor": false,
92
- "vision": false,
93
- "datasetProcess": true,
94
- "usedInClassify": true,
95
- "usedInExtractFields": true,
96
- "usedInToolCall": true,
97
- "usedInQueryExtension": true,
98
- "toolChoice": true,
99
- "functionCall": true,
100
- "customCQPrompt": "",
101
- "customExtractPrompt": "",
102
- "defaultSystemChatPrompt": "",
103
- "defaultConfig": {}
104
- },
105
- {
106
- "model": "deepseek-ai/deepseek-vl2",
107
- "name": "deepseek-vl2",
108
- "provider": "DeepSeek",
109
- "maxContext": 4096,
110
- "maxResponse": 2048,
111
- "quoteMaxToken": 4096,
112
- "maxTemperature": 1.0,
113
- "inputPrice": 0,
114
- "outputPrice": 0,
115
- "censor": false,
116
- "vision": true,
117
- "datasetProcess": true,
118
- "usedInClassify": true,
119
- "usedInExtractFields": true,
120
- "usedInToolCall": true,
121
- "usedInQueryExtension": true,
122
- "toolChoice": false,
123
- "functionCall": false,
124
- "customCQPrompt": "",
125
- "customExtractPrompt": "",
126
- "defaultSystemChatPrompt": "",
127
- "defaultConfig": {}
128
- }
129
- ],
130
- "vectorModels": [
131
- {
132
- "model": "BAAI/bge-m3",
133
- "name": "bge-m3",
134
- "avatar": "/imgs/model/openai.svg",
135
- "charsPointsPrice": 0,
136
- "defaultToken": 4096,
137
- "maxToken": 8192,
138
- "weight": 100,
139
- "defaultConfig": {
140
- "dimensions": 1024
141
- }
142
- },
143
- {
144
- "model": "BAAI/bge-large-zh-v1.5",
145
- "name": "bge-large-zh-v1.5",
146
- "avatar": "/imgs/model/openai.svg",
147
- "charsPointsPrice": 0,
148
- "defaultToken": 512,
149
- "maxToken": 512,
150
- "weight": 100,
151
- "defaultConfig": {
152
- "dimensions": 1024
153
- }
154
- },
155
- {
156
- "model": "netease-youdao/bce-embedding-base_v1",
157
- "name": "bce-embedding-base_v1",
158
- "avatar": "/imgs/model/openai.svg",
159
- "charsPointsPrice": 0,
160
- "defaultToken": 512,
161
- "maxToken": 512,
162
- "weight": 100,
163
- "defaultConfig": {
164
- "dimensions": 768
165
- }
166
- }
167
- ],
168
- "reRankModels": [{
169
- "model": "BAAI/bge-reranker-v2-m3",
170
- "name": "BAAI/bge-reranker-v2-m3",
171
- "requestUrl": "https://api.siliconflow.cn/v1/rerank",
172
- "requestAuth": "sk-yqiafonlffzbtihnqycptjfuijxshqgwhapelfyxadgqnoou"
173
- }],
174
- "audioSpeechModels": [
175
- {
176
- "model": "FunAudioLLM/CosyVoice2-0.5B",
177
- "name": "CosyVoice2-0.5B",
178
- "charsPointsPrice": 0,
179
- "voices": [
180
- {
181
- "label": "anna",
182
- "value": "FunAudioLLM/CosyVoice2-0.5B:anna",
183
- "bufferId": "openai-Alloy"
184
- },
185
- {
186
- "label": "bella",
187
- "value": "FunAudioLLM/CosyVoice2-0.5B:bella",
188
- "bufferId": "openai-Echo"
189
- },
190
- {
191
- "label": "benjamin",
192
- "value": "FunAudioLLM/CosyVoice2-0.5B:benjamin",
193
- "bufferId": "openai-Fable"
194
- },
195
- {
196
- "label": "charles",
197
- "value": "FunAudioLLM/CosyVoice2-0.5B:charles",
198
- "bufferId": "openai-Onyx"
199
- },
200
- {
201
- "label": "claire",
202
- "value": "FunAudioLLM/CosyVoice2-0.5B:claire",
203
- "bufferId": "openai-Nova"
204
- },
205
- {
206
- "label": "david",
207
- "value": "FunAudioLLM/CosyVoice2-0.5B:david",
208
- "bufferId": "openai-Shimmer"
209
- }
210
- ]
211
- }
212
- ],
213
- "whisperModel": {
214
- "model": "FunAudioLLM/SenseVoiceSmall",
215
- "name": "FunAudioLLM/SenseVoiceSmall",
216
- "charsPointsPrice": 0
217
  }
218
  }
 
1
  {
2
+ "feConfigs": {
3
+ "lafEnv": "https://laf.dev" // laf环境。 https://laf.run (杭州阿里云) ,或者私有化的laf环境。如果使用 Laf openapi 功能,需要最新版的 laf 。
 
 
 
4
  },
5
+ "systemEnv": {
6
+ "vectorMaxProcess": 15, // 向量处理线程数量
7
+ "qaMaxProcess": 15, // 问答拆分线程数量
8
+ "tokenWorkers": 50, // Token 计算线程保持数,会持续占用内存,不能设置太大。
9
+ "pgHNSWEfSearch": 100 // 向量搜索参数。越大,搜索越精确,但是速度越慢。设置为100,有99%+精度。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  }
11
  }