QscQ commited on
Commit
4402802
·
0 Parent(s):

i# This is a combination of 8 commits.

Browse files

author qscqesze <[email protected]> 1748943522 +0000
committer qingjun <[email protected]> 1749004700 +0800

initial

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +40 -0
  2. LICENSE-CODE +15 -0
  3. LICENSE-MODEL +43 -0
  4. MiniMax-Text-01_Function_Call_Guide.md +335 -0
  5. MiniMax-Text-01_Function_Call_Guide_CN.md +335 -0
  6. README.md +269 -0
  7. config.json +126 -0
  8. configuration_minimax_text_01.py +152 -0
  9. figures/MiniMaxLogo.png +3 -0
  10. figures/TextBench.png +3 -0
  11. figures/VisionBench.png +3 -0
  12. figures/hailuo.svg +1 -0
  13. figures/image.jpg +3 -0
  14. figures/minimax.svg +1 -0
  15. figures/niah.png +3 -0
  16. figures/wechat-qrcode.jpeg +3 -0
  17. main.py +100 -0
  18. merges.txt +0 -0
  19. model-00000-of-00413.safetensors +3 -0
  20. model-00001-of-00413.safetensors +3 -0
  21. model-00002-of-00413.safetensors +3 -0
  22. model-00003-of-00413.safetensors +3 -0
  23. model-00004-of-00413.safetensors +3 -0
  24. model-00005-of-00413.safetensors +3 -0
  25. model-00006-of-00413.safetensors +3 -0
  26. model-00007-of-00413.safetensors +3 -0
  27. model-00008-of-00413.safetensors +3 -0
  28. model-00009-of-00413.safetensors +3 -0
  29. model-00010-of-00413.safetensors +3 -0
  30. model-00011-of-00413.safetensors +3 -0
  31. model-00012-of-00413.safetensors +3 -0
  32. model-00013-of-00413.safetensors +3 -0
  33. model-00014-of-00413.safetensors +3 -0
  34. model-00015-of-00413.safetensors +3 -0
  35. model-00016-of-00413.safetensors +3 -0
  36. model-00017-of-00413.safetensors +3 -0
  37. model-00018-of-00413.safetensors +3 -0
  38. model-00019-of-00413.safetensors +3 -0
  39. model-00020-of-00413.safetensors +3 -0
  40. model-00021-of-00413.safetensors +3 -0
  41. model-00022-of-00413.safetensors +3 -0
  42. model-00023-of-00413.safetensors +3 -0
  43. model-00024-of-00413.safetensors +3 -0
  44. model-00025-of-00413.safetensors +3 -0
  45. model-00026-of-00413.safetensors +3 -0
  46. model-00027-of-00413.safetensors +3 -0
  47. model-00028-of-00413.safetensors +3 -0
  48. model-00029-of-00413.safetensors +3 -0
  49. model-00030-of-00413.safetensors +3 -0
  50. model-00031-of-00413.safetensors +3 -0
.gitattributes ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
37
+ *.pdf filter=lfs diff=lfs merge=lfs -text
38
+ *.png filter=lfs diff=lfs merge=lfs -text
39
+ *.jpg filter=lfs diff=lfs merge=lfs -text
40
+ *.gif filter=lfs diff=lfs merge=lfs -text
LICENSE-CODE ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ MIT License
3
+
4
+ Copyright 2025 MiniMax AI
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
6
+ documentation files (the “Software”), to deal in the Software without restriction, including without limitation
7
+ the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
8
+ and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
9
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions
10
+ of the Software.
11
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
12
+ TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
13
+ THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
14
+ CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
15
+ IN THE SOFTWARE.
LICENSE-MODEL ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ MINIMAX MODEL LICENSE
3
+
4
+ Model Release Date: 15 January 2025
5
+
6
+ 1. Definitions
7
+ "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Model Materials set forth herein.
8
+ "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
9
+ "Model" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by MiniMax at https://huggingface.co/MiniMaxAI and https://github.com/MiniMax-AI.
10
+ "Model Materials" means, collectively, the Model and any source code, scripts, specifications, manuals and documentation accompanying the Model (and any portion thereof) made available under this Agreement.
11
+ "MiniMax" or "we" means MiniMax AI.
12
+
13
+ 2. License Rights and Redistribution
14
+ a. Grant of Rights. You are granted a non-exclusive, worldwide and royalty-free limited license under MiniMax’s intellectual property or other rights owned by MiniMax embodied in the Model Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Model Materials.
15
+ b. Redistribution and Use.
16
+ i. If you distribute or make available the Model Materials (or any derivative works thereof), or a product or service that uses any of them, including another AI model, you shall provide a copy of this Agreement with any such the Model Materials or derivative works and cause any modified files to carry prominent notices stating that you changed the files. You may add your own copyright statement to your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the work otherwise complies with the terms and conditions of this Agreement.
17
+ ii. You must retain in all copies of the Model Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “MiniMax AI model is licensed under the MiniMax Model License, Copyright © MiniMax. All Rights Reserved.”
18
+ iii. Your use of the Model Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Prohibited Uses Policy for the Model Materials, which is hereby incorporated by reference into this Agreement.
19
+
20
+ 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE MODEL MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND MINIMAX DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE MODEL MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE MODEL MATERIALS AND ANY OUTPUT AND RESULTS.
21
+
22
+ 4. Limitation of Liability. IN NO EVENT WILL MINIMAX OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF MINIMAX OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
23
+
24
+ 5. Intellectual Property.
25
+ a. No trademark license is granted to use the trade names, trademarks, service marks, or product names of MiniMax, except as required to fulfill notice requirements under this Agreement.
26
+ b. Subject to MiniMax’s ownership of the Model Materials and derivatives made by or for MiniMax, with respect to any derivative works and modifications of the Model Materials that are made by you, as between you and MiniMax, you are and will be the owner of such derivative works and modifications.
27
+ c. If you institute litigation or other proceedings against MiniMax or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model Materials or outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless MiniMax from and against any claim by any third party arising out of or related to your use or distribution of the Model Materials.
28
+
29
+ 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Model Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. MiniMax may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Model Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
30
+
31
+ 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of Singapore without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. Any dispute arising out of or in connection with this Agreement, including any question regarding its existence, validity or termination, shall be referred to and finally resolved by arbitration administered by the Singapore International Arbitration Centre (“SIAC”) in accordance with the Arbitration Rules of the Singapore International Arbitration Centre (“SIAC Rules”) for the time being in force, which rules are deemed to be incorporated by reference in this clause.
32
+
33
+ Prohibited Uses Policy
34
+ You agree you will not use, or allow others to use, the Models or any derivatives of the Models to:
35
+ 1. Violate any applicable federal, state, local, or international law or regulation, or infringe upon the lawful rights or interests of any third party.
36
+ 2. Assist with, engage in or otherwise support any military purpose.
37
+ 3. Exploit, harm, or attempt to exploit or harm minors in any way.
38
+ 4. Generate or disseminate false or misleading information with the intent to cause harm.
39
+ 5. Generate or disseminate content prohibited by applicable laws or regulations.
40
+ 6. Generate or disseminate personally identifiable information without proper authorization or for unlawful or unreasonable purposes.
41
+ 7. Defame, disparage, harass, or cause harm to any individual or entity.
42
+ 8. Conduct fully automated decision-making that adversely affects an individual’s legal rights or creates or modifies a binding, enforceable obligation.
43
+ 9. Promote discrimination, hate speech, or harmful behavior against individuals or groups based on race or ethnic origin, religion, disability, age, nationality and national origin, veteran status, sexual orientation, gender or gender identity, caste, immigration status, or any other characteristic that is associated with systemic discrimination or marginalization.
MiniMax-Text-01_Function_Call_Guide.md ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-Text-01 Function Call Guide
2
+
3
+ ## 📖 Introduction
4
+
5
+ MiniMax-Text-01 model supports function calling capability, allowing the model to identify when an external function needs to be called and output function call parameters in a structured format. This document provides detailed instructions on how to use the function calling feature of MiniMax-Text-01.
6
+
7
+ ## 🛠️ Defining Function Calls
8
+
9
+ ### Function Structure
10
+
11
+ Function calls need to be defined in the `tools` field of the request body. Each function consists of:
12
+
13
+ ```json
14
+ {
15
+ "tools": [
16
+ {
17
+ "type": "function",
18
+ "function": {
19
+ "name": "function_name", // Function name, required
20
+ "description": "function_description", // Brief description of the function's purpose
21
+ "parameters": { // Parameter definition in JSON Schema format
22
+ "type": "object", // Overall type, fixed as "object"
23
+ "properties": { // Parameter property object
24
+ "param_name": { // Parameter name
25
+ "description": "Parameter description", // Description
26
+ "type": "string|number|boolean|array|object" // Type
27
+ }
28
+ },
29
+ "required": ["param1", "param2"] // List of required parameters
30
+ }
31
+ }
32
+ }
33
+ ]
34
+ }
35
+ ```
36
+
37
+ ### Example
38
+
39
+ Below is a simple example of a weather query function definition:
40
+
41
+ ```json
42
+ "tools": [
43
+ {
44
+ "type": "function",
45
+ "function": {
46
+ "name": "get_current_weather",
47
+ "description": "Get the latest weather for a location",
48
+ "parameters": {
49
+ "type": "object",
50
+ "properties": {
51
+ "location": {
52
+ "type": "string",
53
+ "description": "A certain city, such as Beijing, Shanghai"
54
+ }
55
+ },
56
+ "required": ["location"]
57
+ }
58
+ }
59
+ }
60
+ ]
61
+ ```
62
+
63
+ ### Complete Request Example
64
+
65
+ Below is a complete Python code example that includes function definitions:
66
+
67
+ ```python
68
+ payload = json.dumps({
69
+ "model": "MiniMax-Text-01",
70
+ "messages": [
71
+ {
72
+ "role": "system",
73
+ "content": "MM Intelligent Assistant is a large-scale language model developed by MiniMax and has no interfaces to call other products. MiniMax is a China technology company that has been committed to conducting research related to large models."
74
+ },
75
+ {
76
+ "role": "user",
77
+ "content": "What's the weather like in Shanghai today?"
78
+ }
79
+ ],
80
+ "tools": [
81
+ {
82
+ "type": "function",
83
+ "function": {
84
+ "name": "get_current_weather",
85
+ "description": "Get the latest weather for a location",
86
+ "parameters": {
87
+ "type": "object",
88
+ "properties": {
89
+ "location": {
90
+ "type": "string",
91
+ "description": "A certain city, such as Beijing, Shanghai"
92
+ }
93
+ },
94
+ "required": ["location"]
95
+ }
96
+ }
97
+ }
98
+ ],
99
+ "tool_choice": "auto",
100
+ "stream": True,
101
+ "max_tokens": 10000,
102
+ "temperature": 0.9,
103
+ "top_p": 1
104
+ })
105
+ ```
106
+
107
+ ## 🔄 Function Call Input Format
108
+
109
+ When processed internally by the model, function definitions are converted to a special format and concatenated to the input text:
110
+
111
+ ```
112
+ <beginning_of_sentence>system function_setting=functions
113
+ {"name": "get_current_weather", "description": "Get the latest weather for a location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "A certain city, such as Beijing, Shanghai"}}, "required": ["location"]}}<end_of_sentence>
114
+ ```
115
+
116
+ Important notes:
117
+ 1. Function definitions are placed after the system settings and before the conversation data
118
+ 2. Function definitions are marked with `function_setting=functions`
119
+ 3. Each function is defined as a JSON string
120
+ 4. The area ends with `<end_of_sentence>`
121
+
122
+ ## 📤 Model Function Call Output
123
+
124
+ When the model decides to call a function, it outputs the function call information in a special format:
125
+
126
+ ````
127
+ <function_call>```typescript
128
+ functions.get_current_weather({"location": "Shanghai"})
129
+ ```
130
+ ````
131
+
132
+ "<function_call>" is a special token, followed by "functions.function_name(parameter json structure)". The parameters need to be string-matched and executed externally.
133
+
134
+ ## 📥 Handling Function Results
135
+
136
+ After a function is successfully executed, the model will return output in the following format:
137
+
138
+ ````typescript
139
+ ```typescript
140
+ functions.get_current_weather({"location": "Shanghai"})
141
+ ```
142
+ ````
143
+
144
+ You can use the following regular expression method to extract the function name and parameters for subsequent processing:
145
+
146
+ ````python
147
+ def parse_function_calls(content: str):
148
+ """
149
+ Parse the function call content returned by the model, extract function name and parameters
150
+
151
+ Parameters:
152
+ content: The original content string returned by the model
153
+
154
+ Returns:
155
+ A dictionary of parsed function call information, including function name and parameters
156
+ """
157
+ # Match typescript code block
158
+ pattern = r"```typescript\n(.+?)?\n```"
159
+ matches = re.finditer(pattern, content, re.DOTALL)
160
+
161
+ for match in matches:
162
+ function_code = match.group(1)
163
+ # Extract function name and parameters
164
+ function_match = re.search(r'functions\.(\w+)\((.+)\)', function_code)
165
+
166
+ if not function_match:
167
+ continue
168
+
169
+ function_name = function_match.group(1)
170
+ arguments_str = function_match.group(2)
171
+
172
+ try:
173
+ # Parse parameter JSON
174
+ arguments = json.loads(arguments_str)
175
+ print(f"Function call: {function_name}, Parameters: {arguments}")
176
+
177
+ # Example: Handle weather query function
178
+ if function_name == "get_current_weather":
179
+ location = arguments.get("location", "Unknown location")
180
+ # Build function execution result
181
+ return {
182
+ "role": "function",
183
+ "name": function_name,
184
+ "text": json.dumps({
185
+ "location": location,
186
+ "temperature": "25",
187
+ "unit": "celsius",
188
+ "weather": "Sunny"
189
+ }, ensure_ascii=False)
190
+ }
191
+ except json.JSONDecodeError as e:
192
+ print(f"Parameter parsing failed: {arguments_str}, Error: {e}")
193
+
194
+ return {}
195
+ ````
196
+
197
+ After successfully parsing the function call, you should add the function execution result to the conversation history so that the model can access and utilize this information in subsequent interactions.
198
+
199
+ ## 💻 Function Call Example with Transformers Library
200
+
201
+ The official MiniMax-Text-01 repository provides a complete example of function calling using the Transformers library. You can view the source code in the [MiniMaxAI/MiniMax-Text-01 huggingface repository](https://huggingface.co/MiniMaxAI/MiniMax-Text-01/blob/main/main.py).
202
+
203
+ The following is the key part of implementing function calls using the Transformers library:
204
+
205
+ ```python
206
+ def get_default_tools():
207
+ return [
208
+ {
209
+ "type": "function",
210
+ "function": {
211
+ "name": "get_current_weather",
212
+ "description": "Get the latest weather for a location",
213
+ "parameters": {
214
+ "type": "object",
215
+ "properties": {
216
+ "location": {
217
+ "type": "string",
218
+ "description": "A certain city, such as Beijing, Shanghai"
219
+ }
220
+ },
221
+ "required": ["location"]
222
+ }
223
+ }
224
+ }
225
+ ]
226
+
227
+ # Load model and tokenizer
228
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
229
+ prompt = "What's the weather like in Shanghai today?"
230
+ messages = [
231
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant created by Minimax based on MiniMax-Text-01 model."}]},
232
+ {"role": "user", "content": [{"type": "text", "text": prompt}]},
233
+ ]
234
+
235
+ # Enable function call tools
236
+ tools = get_default_tools()
237
+
238
+ # Apply chat template and add tool definitions
239
+ text = tokenizer.apply_chat_template(
240
+ messages,
241
+ tokenize=False,
242
+ add_generation_prompt=True,
243
+ tools=tools
244
+ )
245
+
246
+ # Generate response
247
+ model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
248
+ quantized_model = AutoModelForCausalLM.from_pretrained(
249
+ model_id,
250
+ torch_dtype="bfloat16",
251
+ device_map=device_map,
252
+ quantization_config=quantization_config,
253
+ trust_remote_code=True,
254
+ offload_buffers=True,
255
+ )
256
+ generation_config = GenerationConfig(
257
+ max_new_tokens=20,
258
+ eos_token_id=200020,
259
+ use_cache=True,
260
+ )
261
+
262
+ # Execute generation
263
+ generated_ids = quantized_model.generate(**model_inputs, generation_config=generation_config)
264
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
265
+ ```
266
+
267
+ ### Running the Example
268
+
269
+ You can run the example code using the following command:
270
+
271
+ ```bash
272
+ export SAFETENSORS_FAST_GPU=1
273
+ python main.py --quant_type int8 --world_size 8 --model_id <model_path> --enable_tools
274
+ ```
275
+
276
+ Parameter description:
277
+ - `--quant_type`: Quantization type, options are "default" or "int8"
278
+ - `--world_size`: Number of GPUs, int8 quantization requires at least 8 GPUs
279
+ - `--model_id`: Model path
280
+ - `--enable_tools`: Enable function call feature
281
+
282
+ ### Result Processing
283
+ As expected, you will get the following output:
284
+
285
+ ````base
286
+ ```typescript
287
+ functions.get_current_weather({"location": "Shanghai"})
288
+ ```
289
+ ````
290
+
291
+ You can use regular expressions to extract the function to call and its corresponding parameters:
292
+
293
+ ````python
294
+ def try_parse_tool_calls(content: str):
295
+ pattern = r"```typescript\n(.+?)?\n```"
296
+ matches = re.finditer(pattern, content, re.DOTALL)
297
+
298
+ for match in matches:
299
+ function_code = match.group(1)
300
+ function_match = re.search(r'functions\.(\w+)\((.+)\)', function_code)
301
+
302
+ if not function_match:
303
+ continue
304
+
305
+ function_name = function_match.group(1)
306
+ arguments_str = function_match.group(2)
307
+
308
+ try:
309
+ arguments = json.loads(arguments_str)
310
+ print(f"tool_calls: [{{'type': 'function', 'function': {{'name': '{function_name}', 'arguments': {arguments}}}}}]")
311
+
312
+ if function_name == "get_current_weather":
313
+ location = arguments.get("location", "Unknown")
314
+ return {"role": "function", "name": function_name, "text": f'{{"location": "{location}", "temperature": "25", "unit": "celsius", "weather": "Sun"}}'}
315
+ except json.JSONDecodeError as e:
316
+ print(f"Failed parse tools: {arguments_str}, Error: {e}")
317
+
318
+ return {}
319
+ ````
320
+
321
+ ### Chat Template
322
+
323
+ MiniMax-Text-01 uses a specific chat template format to process function calls. The chat template is defined in `tokenizer_config.json`:
324
+
325
+ ```json
326
+ "{% for message in messages %}{% if message['role'] == 'system' %}{{ '<beginning_of_sentence>system ai_setting=assistant\\n' + message['content'][0]['text'] + '<end_of_sentence>\\n'}}{% elif message['role'] == 'user' %}{{ '<beginning_of_sentence>user name=user\\n' + message['content'][0]['text'] + '<end_of_sentence>\\n'}}{% elif message['role'] == 'assistant' %}{{ '<beginning_of_sentence>ai name=assistant\\n' }}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] }}{% endgeneration %}{% endfor %}{{ '<end_of_sentence>\\n' }}{% elif message['role'] == 'function' %}{{ '<beginning_of_sentence>system function_response=functions\\n' + '{\"name\": \"' + message['name'] + '\", \"response\": ' + message['content'][0]['text'] + '}' + '<end_of_sentence>\\n'}}{% endif %}{% endfor %}{% if tools %}{% for function in tools %}{{ '<beginning_of_sentence>system function_setting=functions\\n' + function | tojson + '<end_of_sentence>\\n'}}{% endfor %}{% endif %}{% if add_generation_prompt %}{{ '<beginning_of_sentence>ai name=assistant\\n' }}{% endif %}"
327
+ ```
328
+
329
+ ## 📝 Important Notes
330
+
331
+ 1. Function names should follow programming language naming conventions and avoid special characters
332
+ 2. Parameter descriptions should be concise and help the model understand the parameter's purpose and constraints
333
+ 3. The model does not guarantee that it will call a function; this depends on the user's input and the model's judgment
334
+ 4. Function results should be returned in a structured format for easy processing by the model
335
+ 5. The model might not call a function even if one is provided, depending on whether it determines a function call is appropriate for the given user query
MiniMax-Text-01_Function_Call_Guide_CN.md ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-Text-01 函数调用(Function Call)功能指南
2
+
3
+ ## 📖 简介
4
+
5
+ MiniMax-Text-01 模型支持函数调用功能,使模型能够识别何时需要调用外部函数,并以结构化格式输出函数调用参数。本文档详细介绍了如何使用 MiniMax-Text-01 的函数调用功能。
6
+
7
+ ## 🛠️ 函数调用的定义
8
+
9
+ ### 函数结构体
10
+
11
+ 函数调用需要在请求体中定义 `tools` 字段,每个函数由以下部分组成:
12
+
13
+ ```json
14
+ {
15
+ "tools": [
16
+ {
17
+ "type": "function",
18
+ "function": {
19
+ "name": "function_name", // 函数名称,必填
20
+ "description": "function_description", // 函数描述,应简明扼要说明函数功能
21
+ "parameters": { // 函数参数定义,符合 JSON Schema 格式
22
+ "type": "object", // 参数整体类型,固定为object
23
+ "properties": { // 参数属性对象
24
+ "param_name": { // 参数名称
25
+ "description": "参数描述", // 参数说明
26
+ "type": "string|number|boolean|array|object" // 参数类型
27
+ }
28
+ },
29
+ "required": ["param1", "param2"] // 必填参数列表
30
+ }
31
+ }
32
+ }
33
+ ]
34
+ }
35
+ ```
36
+
37
+ ### 示例
38
+
39
+ 以下是一个简单的天气查询函数定义示例:
40
+
41
+ ```json
42
+ "tools": [
43
+ {
44
+ "type": "function",
45
+ "function": {
46
+ "name": "get_current_weather",
47
+ "description": "Get the latest weather for a location",
48
+ "parameters": {
49
+ "type": "object",
50
+ "properties": {
51
+ "location": {
52
+ "type": "string",
53
+ "description": "A certain city, such as Beijing, Shanghai"
54
+ }
55
+ },
56
+ "required": ["location"]
57
+ }
58
+ }
59
+ }
60
+ ]
61
+ ```
62
+
63
+ ### 完整请求示例
64
+
65
+ 下面是一个包含函数定义的完整Python代码示例:
66
+
67
+ ```python
68
+ payload = json.dumps({
69
+ "model": "MiniMax-Text-01",
70
+ "messages": [
71
+ {
72
+ "role": "system",
73
+ "content": "MM Intelligent Assistant is a large-scale language model developed by MiniMax and has no interfaces to call other products. MiniMax is a China technology company that has been committed to conducting research related to large models."
74
+ },
75
+ {
76
+ "role": "user",
77
+ "content": "上海今天天气怎么样?"
78
+ }
79
+ ],
80
+ "tools": [
81
+ {
82
+ "type": "function",
83
+ "function": {
84
+ "name": "get_current_weather",
85
+ "description": "Get the latest weather for a location",
86
+ "parameters": {
87
+ "type": "object",
88
+ "properties": {
89
+ "location": {
90
+ "type": "string",
91
+ "description": "A certain city, such as Beijing, Shanghai"
92
+ }
93
+ },
94
+ "required": ["location"]
95
+ }
96
+ }
97
+ }
98
+ ],
99
+ "tool_choice": "auto",
100
+ "stream": True,
101
+ "max_tokens": 10000,
102
+ "temperature": 0.9,
103
+ "top_p": 1
104
+ })
105
+ ```
106
+
107
+ ## 🔄 函数调用的输入格式
108
+
109
+ 在模型内部处理时,函数定义会被转换为特殊格式并拼接到输入文本中:
110
+
111
+ ```
112
+ <beginning_of_sentence>system function_setting=functions
113
+ {"name": "get_current_weather", "description": "Get the latest weather for a location", "parameters": {"type": "object", "properties": {"location": {"type": "string", "description": "A certain city, such as Beijing, Shanghai"}}, "required": ["location"]}}<end_of_sentence>
114
+ ```
115
+
116
+ 注意事项:
117
+ 1. 函数定义位于系统设置之后、对话数据之前
118
+ 2. 使用 `function_setting=functions` 标记函数定义区域
119
+ 3. 每个函数定义使用JSON字符串表示
120
+ 4. 区域以 `<end_of_sentence>` 结束
121
+
122
+ ## 📤 模型的函数调用输出
123
+
124
+ 当模型决定调用函数时,它会在响应中使用特殊格式输出函数调用信息:
125
+
126
+ ````
127
+ <function_call>```typescript
128
+ functions.get_current_weather({"location": "上海"})
129
+ ```
130
+ ````
131
+
132
+ "<function_call>" 是 special token, 后面的 "functions.函数名(参数 json 结构体)", 需要字符串匹配出参数, 交外部执行.
133
+
134
+ ## 📥 函数执行结果的处理
135
+
136
+ 当函数调用成功执行后,模型将返回以下格式的输出:
137
+
138
+ ````typescript
139
+ ```typescript
140
+ functions.get_current_weather({"location": "Shanghai"})
141
+ ```
142
+ ````
143
+
144
+ 您可以使用以下正则表达式方法提取函数名称和参数,便于后续处理:
145
+
146
+ ````python
147
+ def parse_function_calls(content: str):
148
+ """
149
+ 解析模型返回的函数调用内容,提取函数名和参数
150
+
151
+ 参数:
152
+ content: 模型返回的原始内容字符串
153
+
154
+ 返回:
155
+ 解析后的函数调用信息字典,包含函数名和参数
156
+ """
157
+ # 匹配 typescript 代码块
158
+ pattern = r"```typescript\n(.+?)?\n```"
159
+ matches = re.finditer(pattern, content, re.DOTALL)
160
+
161
+ for match in matches:
162
+ function_code = match.group(1)
163
+ # 提取函数名和参数
164
+ function_match = re.search(r'functions\.(\w+)\((.+)\)', function_code)
165
+
166
+ if not function_match:
167
+ continue
168
+
169
+ function_name = function_match.group(1)
170
+ arguments_str = function_match.group(2)
171
+
172
+ try:
173
+ # 解析参数JSON
174
+ arguments = json.loads(arguments_str)
175
+ print(f"调用函数: {function_name}, 参数: {arguments}")
176
+
177
+ # 示例: 处理天气查询函数
178
+ if function_name == "get_current_weather":
179
+ location = arguments.get("location", "未知位置")
180
+ # 构建函数执行结果
181
+ return {
182
+ "role": "function",
183
+ "name": function_name,
184
+ "text": json.dumps({
185
+ "location": location,
186
+ "temperature": "25",
187
+ "unit": "celsius",
188
+ "weather": "晴朗"
189
+ }, ensure_ascii=False)
190
+ }
191
+ except json.JSONDecodeError as e:
192
+ print(f"参数解析失败: {arguments_str}, 错误: {e}")
193
+
194
+ return {}
195
+ ````
196
+
197
+ 成功解析函数调用后,您应将函数执行结果添加到对话历史中,以便模型在后续交互中能够访问和利用这些信息。
198
+
199
+ ## 💻 使用 Transformers 库的函数调用示例
200
+
201
+ MiniMax-Text-01 官方仓库提供了使用 Transformers 库进行函数调用的完整示例。您可以在 [MiniMaxAI/MiniMax-Text-01 huggingface 仓库](https://huggingface.co/MiniMaxAI/MiniMax-Text-01/blob/main/main.py) 中查看源代码。
202
+
203
+ 以下是使用 Transformers 库实现函数调用的关键部分:
204
+
205
+ ```python
206
+ def get_default_tools():
207
+ return [
208
+ {
209
+ "type": "function",
210
+ "function": {
211
+ "name": "get_current_weather",
212
+ "description": "Get the latest weather for a location",
213
+ "parameters": {
214
+ "type": "object",
215
+ "properties": {
216
+ "location": {
217
+ "type": "string",
218
+ "description": "A certain city, such as Beijing, Shanghai"
219
+ }
220
+ },
221
+ "required": ["location"]
222
+ }
223
+ }
224
+ }
225
+ ]
226
+
227
+ # 加载模型和分词器
228
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
229
+ prompt = "What's the weather like in Shanghai today?"
230
+ messages = [
231
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant created by Minimax based on MiniMax-Text-01 model."}]},
232
+ {"role": "user", "content": [{"type": "text", "text": prompt}]},
233
+ ]
234
+
235
+ # 启用函数调用工具
236
+ tools = get_default_tools()
237
+
238
+ # 应用聊天模板,并加入工具定义
239
+ text = tokenizer.apply_chat_template(
240
+ messages,
241
+ tokenize=False,
242
+ add_generation_prompt=True,
243
+ tools=tools
244
+ )
245
+
246
+ # 生成回复
247
+ model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
248
+ quantized_model = AutoModelForCausalLM.from_pretrained(
249
+ model_id,
250
+ torch_dtype="bfloat16",
251
+ device_map=device_map,
252
+ quantization_config=quantization_config,
253
+ trust_remote_code=True,
254
+ offload_buffers=True,
255
+ )
256
+ generation_config = GenerationConfig(
257
+ max_new_tokens=20,
258
+ eos_token_id=200020,
259
+ use_cache=True,
260
+ )
261
+
262
+ # 执行生成
263
+ generated_ids = quantized_model.generate(**model_inputs, generation_config=generation_config)
264
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
265
+ ```
266
+
267
+ ### 运行方式
268
+
269
+ 您可以通过以下命令运行示例代码:
270
+
271
+ ```bash
272
+ export SAFETENSORS_FAST_GPU=1
273
+ python main.py --quant_type int8 --world_size 8 --model_id <model_path> --enable_tools
274
+ ```
275
+
276
+ 参数说明:
277
+ - `--quant_type`: 量化类型,可选 "default" 或 "int8"
278
+ - `--world_size`: GPU 数量,int8 量化至少需要 8 个 GPU
279
+ - `--model_id`: 模型路径
280
+ - `--enable_tools`: 启用函数调用功能
281
+
282
+ ### 结果处理
283
+ 符合预期的情况下,你将得到以下输出
284
+
285
+ ````base
286
+ ```typescript
287
+ functions.get_current_weather({"location": "Shanghai"})
288
+ ```
289
+ ````
290
+
291
+ 你可以使用正则表达式提取出需要调用的 function 和 对应的参数
292
+
293
+ ````python
294
+ def try_parse_tool_calls(content: str):
295
+ pattern = r"```typescript\n(.+?)?\n```"
296
+ matches = re.finditer(pattern, content, re.DOTALL)
297
+
298
+ for match in matches:
299
+ function_code = match.group(1)
300
+ function_match = re.search(r'functions\.(\w+)\((.+)\)', function_code)
301
+
302
+ if not function_match:
303
+ continue
304
+
305
+ function_name = function_match.group(1)
306
+ arguments_str = function_match.group(2)
307
+
308
+ try:
309
+ arguments = json.loads(arguments_str)
310
+ print(f"tool_calls: [{{'type': 'function', 'function': {{'name': '{function_name}', 'arguments': {arguments}}}}}]")
311
+
312
+ if function_name == "get_current_weather":
313
+ location = arguments.get("location", "Unknown")
314
+ return {"role": "function", "name": function_name, "text": f'{{"location": "{location}", "temperature": "25", "unit": "celsius", "weather": "Sun"}}'}
315
+ except json.JSONDecodeError as e:
316
+ print(f"Failed parse tools: {arguments_str}, Error: {e}")
317
+
318
+ return {}
319
+ ````
320
+
321
+ ### 聊天模板
322
+
323
+ MiniMax-Text-01 使用特定的聊天模板格式处理函数调用。聊天模板定义在 `tokenizer_config.json` 中:
324
+
325
+ ```json
326
+ "{% for message in messages %}{% if message['role'] == 'system' %}{{ '<beginning_of_sentence>system ai_setting=assistant\\n' + message['content'][0]['text'] + '<end_of_sentence>\\n'}}{% elif message['role'] == 'user' %}{{ '<beginning_of_sentence>user name=user\\n' + message['content'][0]['text'] + '<end_of_sentence>\\n'}}{% elif message['role'] == 'assistant' %}{{ '<beginning_of_sentence>ai name=assistant\\n' }}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] }}{% endgeneration %}{% endfor %}{{ '<end_of_sentence>\\n' }}{% elif message['role'] == 'function' %}{{ '<beginning_of_sentence>system function_response=functions\\n' + '{\"name\": \"' + message['name'] + '\", \"response\": ' + message['content'][0]['text'] + '}' + '<end_of_sentence>\\n'}}{% endif %}{% endfor %}{% if tools %}{% for function in tools %}{{ '<beginning_of_sentence>system function_setting=functions\\n' + function | tojson + '<end_of_sentence>\\n'}}{% endfor %}{% endif %}{% if add_generation_prompt %}{{ '<beginning_of_sentence>ai name=assistant\\n' }}{% endif %}"
327
+
328
+ ```
329
+
330
+ ## 📝 注意事项
331
+
332
+ 1. 函数名称应当遵循编程语言的命名规范,避免使用特殊字符
333
+ 2. 参数描述应当简洁明了,帮助模型理解参数的用途和约束
334
+ 3. 模型并不保证每次都会调用函数,这取决于用户的输入和模型的判断
335
+ 4. 函数调用结果应当以结构化方式返回,便于模型理解和处理
README.md ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ ---
4
+ <div align="center">
5
+
6
+ <svg width="60%" height="auto" viewBox="0 0 144 48" fill="none" xmlns="http://www.w3.org/2000/svg">
7
+ <path d="M26.6782 7.96523C26.6782 7.02436 25.913 6.26087 24.9739 6.26087C24.0348 6.26087 23.2695 7.0261 23.2695 7.96523V36.2139C23.2695 38.4 21.4904 40.1791 19.3043 40.1791C17.1183 40.1791 15.3391 38.4 15.3391 36.2139V18.0904C15.3391 17.1496 14.5739 16.3861 13.6348 16.3861C12.6956 16.3861 11.9304 17.1513 11.9304 18.0904V25.7722C11.9304 27.9583 10.1513 29.7374 7.96518 29.7374C5.7791 29.7374 4 27.9583 4 25.7722V22.9878C4 22.3635 4.50609 21.8574 5.13043 21.8574C5.75478 21.8574 6.26087 22.3635 6.26087 22.9878V25.7722C6.26087 26.713 7.02605 27.4765 7.96518 27.4765C8.90431 27.4765 9.66954 26.7113 9.66954 25.7722V18.0904C9.66954 15.9044 11.4487 14.1252 13.6348 14.1252C15.8209 14.1252 17.6 15.9044 17.6 18.0904V36.2139C17.6 37.1548 18.3652 37.9183 19.3043 37.9183C20.2435 37.9183 21.0087 37.153 21.0087 36.2139V25.1322V7.96523C21.0087 5.77914 22.7878 4 24.9739 4C27.16 4 28.9391 5.77914 28.9391 7.96523V31.3565C28.9391 31.9809 28.433 32.487 27.8087 32.487C27.1843 32.487 26.6782 31.9809 26.6782 31.3565V7.96523ZM47.6539 14.1252C45.4678 14.1252 43.6887 15.9044 43.6887 18.0904V33.2296C43.6887 34.1704 42.9235 34.9339 41.9843 34.9339C41.0452 34.9339 40.28 34.1687 40.28 33.2296V7.96523C40.28 5.77914 38.5008 4 36.3148 4C34.1287 4 32.3496 5.77914 32.3496 7.96523V40.0348C32.3496 40.9756 31.5843 41.7391 30.6452 41.7391C29.7061 41.7391 28.9409 40.9739 28.9409 40.0348V36.0643C28.9409 35.44 28.4348 34.9339 27.8104 34.9339C27.1861 34.9339 26.68 35.44 26.68 36.0643V40.0348C26.68 42.2209 28.4591 44 30.6452 44C32.8313 44 34.6104 42.2209 34.6104 40.0348V7.96523C34.6104 7.02436 35.3756 6.26087 36.3148 6.26087C37.2539 6.26087 38.0191 7.0261 38.0191 7.96523V33.2296C38.0191 35.4156 39.7982 37.1948 41.9843 37.1948C44.1704 37.1948 45.9496 35.4156 45.9496 33.2296V18.0904C45.9496 17.1496 46.7148 16.3861 47.6539 16.3861C48.593 16.3861 49.3582 17.1513 49.3582 18.0904V31.3565C49.3582 31.9809 49.8643 32.487 50.4887 32.487C51.113 32.487 51.6191 31.9809 51.6191 31.3565V18.0904C51.6191 15.9044 49.84 14.1252 47.6539 14.1252Z" fill="url(#paint0_linear_17_483)"/>
8
+ <path d="M68.7671 16.5615H71.2541C71.3254 16.5615 71.3845 16.5859 71.435 16.6363C71.4836 16.6868 71.5097 16.7459 71.5097 16.8172V31.1824C71.5097 31.2537 71.4854 31.3128 71.435 31.3633C71.3845 31.4137 71.3254 31.4381 71.2541 31.4381H68.7671C68.6958 31.4381 68.6367 31.4137 68.5862 31.3633C68.5358 31.3146 68.5115 31.2537 68.5115 31.1824V21.812C68.5115 21.7563 68.4976 21.7268 68.4697 21.7268C68.4419 21.7268 68.4123 21.7476 68.3845 21.7911L66.1323 25.318C66.061 25.4311 65.9619 25.4885 65.8349 25.4885H64.581C64.4541 25.4885 64.3549 25.4328 64.2836 25.318L62.0315 21.7911C62.0036 21.7494 61.9741 21.7302 61.9462 21.7372C61.9184 21.7441 61.9045 21.7772 61.9045 21.8328V31.1824C61.9045 31.2537 61.8802 31.3128 61.8297 31.3633C61.7793 31.4137 61.7202 31.4381 61.6489 31.4381H59.1619C59.0906 31.4381 59.0315 31.4137 58.981 31.3633C58.9306 31.3146 58.9062 31.2537 58.9062 31.1824V16.8172C58.9062 16.7459 58.9306 16.6868 58.981 16.6363C59.0315 16.5859 59.0906 16.5615 59.1619 16.5615H61.6489C61.7758 16.5615 61.8749 16.6189 61.9462 16.732L65.1341 21.6833C65.1758 21.7685 65.2193 21.7685 65.261 21.6833L68.4697 16.732C68.541 16.6189 68.6402 16.5615 68.7671 16.5615Z" fill="currentColor"/>
9
+ <path d="M74.1764 31.3633C74.1259 31.3146 74.1016 31.2537 74.1016 31.1824V16.8172C74.1016 16.7459 74.1259 16.6868 74.1764 16.6363C74.2268 16.5859 74.2859 16.5615 74.3572 16.5615H76.8442C76.9155 16.5615 76.9746 16.5859 77.0251 16.6363C77.0737 16.6868 77.0998 16.7459 77.0998 16.8172V31.1824C77.0998 31.2537 77.0755 31.3128 77.0251 31.3633C76.9746 31.4137 76.9155 31.4381 76.8442 31.4381H74.3572C74.2859 31.4381 74.2268 31.4137 74.1764 31.3633Z" fill="currentColor"/>
10
+ <path d="M88.3066 16.6361C88.3553 16.5874 88.4162 16.5613 88.4875 16.5613H90.9744C91.0457 16.5613 91.1049 16.5857 91.1553 16.6361C91.204 16.6865 91.2301 16.7457 91.2301 16.817V31.1822C91.2301 31.2535 91.2057 31.3126 91.1553 31.363C91.1049 31.4135 91.0457 31.4378 90.9744 31.4378H88.5727C88.4301 31.4378 88.331 31.3822 88.2753 31.2674L82.771 22.1717C82.7431 22.13 82.7136 22.1109 82.6858 22.1178C82.6579 22.1248 82.644 22.1578 82.644 22.2135L82.6858 31.1805C82.6858 31.2518 82.6614 31.3109 82.611 31.3613C82.5606 31.4117 82.5014 31.4361 82.4301 31.4361H79.9431C79.8718 31.4361 79.8127 31.4117 79.7623 31.3613C79.7118 31.3126 79.6875 31.2518 79.6875 31.1805V16.8152C79.6875 16.7439 79.7118 16.6848 79.7623 16.6344C79.8127 16.5839 79.8718 16.5596 79.9431 16.5596H82.3449C82.4858 16.5596 82.5849 16.617 82.6423 16.73L88.124 25.7822C88.1518 25.8239 88.1797 25.8431 88.2092 25.8361C88.2371 25.8292 88.251 25.7978 88.251 25.7404L88.2301 16.8152C88.2301 16.7439 88.2545 16.6848 88.3049 16.6344L88.3066 16.6361Z" fill="currentColor"/>
11
+ <path d="M93.8951 31.3633C93.8446 31.3146 93.8203 31.2537 93.8203 31.1824V16.8172C93.8203 16.7459 93.8446 16.6868 93.8951 16.6363C93.9455 16.5859 94.0047 16.5615 94.076 16.5615H96.5629C96.6342 16.5615 96.6934 16.5859 96.7438 16.6363C96.7925 16.6868 96.8186 16.7459 96.8186 16.8172V31.1824C96.8186 31.2537 96.7942 31.3128 96.7438 31.3633C96.6934 31.4137 96.6342 31.4381 96.5629 31.4381H94.076C94.0047 31.4381 93.9455 31.4137 93.8951 31.3633Z" fill="currentColor"/>
12
+ <path d="M109.267 16.5615H111.754C111.825 16.5615 111.885 16.5859 111.935 16.6363C111.984 16.6868 112.01 16.7459 112.01 16.8172V31.1824C112.01 31.2537 111.985 31.3128 111.935 31.3633C111.885 31.4137 111.825 31.4381 111.754 31.4381H109.267C109.196 31.4381 109.137 31.4137 109.086 31.3633C109.036 31.3146 109.011 31.2537 109.011 31.1824V21.812C109.011 21.7563 108.998 21.7268 108.97 21.7268C108.942 21.7268 108.912 21.7476 108.885 21.7911L106.632 25.318C106.561 25.4311 106.462 25.4885 106.335 25.4885H105.081C104.954 25.4885 104.855 25.4328 104.784 25.318L102.531 21.7911C102.504 21.7494 102.474 21.7302 102.446 21.7372C102.418 21.7441 102.405 21.7772 102.405 21.8328V31.1824C102.405 31.2537 102.38 31.3128 102.33 31.3633C102.279 31.4137 102.22 31.4381 102.149 31.4381H99.6619C99.5906 31.4381 99.5315 31.4137 99.481 31.3633C99.4306 31.3146 99.4062 31.2537 99.4062 31.1824V16.8172C99.4062 16.7459 99.4306 16.6868 99.481 16.6363C99.5315 16.5859 99.5906 16.5615 99.6619 16.5615H102.149C102.276 16.5615 102.375 16.6189 102.446 16.732L105.634 21.6833C105.676 21.7685 105.719 21.7685 105.761 21.6833L108.97 16.732C109.041 16.6189 109.14 16.5615 109.267 16.5615Z" fill="currentColor"/>
13
+ <path d="M123.782 31.2241L123.144 29.1424C123.116 29.0867 123.079 29.0572 123.038 29.0572H117.81C117.768 29.0572 117.732 29.085 117.704 29.1424L117.088 31.2241C117.046 31.3668 116.954 31.4363 116.812 31.4363H114.112C114.027 31.4363 113.963 31.412 113.921 31.3615C113.879 31.3128 113.871 31.2381 113.9 31.1389L118.49 16.7737C118.532 16.6328 118.624 16.5615 118.766 16.5615H122.102C122.243 16.5615 122.335 16.6328 122.379 16.7737L126.968 31.1389C126.982 31.1668 126.989 31.2033 126.989 31.245C126.989 31.372 126.911 31.4363 126.756 31.4363H124.057C123.916 31.4363 123.824 31.365 123.78 31.2241H123.782ZM118.554 26.7407H122.295C122.38 26.7407 122.408 26.6989 122.38 26.6137L120.467 20.3024C120.453 20.2467 120.432 20.2207 120.403 20.2276C120.375 20.2346 120.352 20.2589 120.339 20.3024L118.469 26.6137C118.455 26.6989 118.483 26.7407 118.554 26.7407Z" fill="currentColor"/>
14
+ <path d="M128.222 31.353C128.18 31.2974 128.187 31.2261 128.243 31.1409L132.365 24.0643C132.393 24.0226 132.393 23.9791 132.365 23.9374L128.243 16.8609L128.201 16.7339C128.201 16.6209 128.28 16.5635 128.434 16.5635H131.133C131.274 16.5635 131.38 16.6209 131.452 16.7339L134.213 21.6C134.255 21.6852 134.299 21.6852 134.34 21.6L137.102 16.7339C137.173 16.6209 137.28 16.5635 137.42 16.5635H140.099C140.198 16.5635 140.269 16.5913 140.311 16.6487C140.353 16.7061 140.346 16.7756 140.29 16.8609L136.168 23.9374C136.154 23.9791 136.154 24.0226 136.168 24.0643L140.29 31.1409L140.332 31.2678C140.332 31.3809 140.253 31.4383 140.099 31.4383H137.42C137.278 31.4383 137.172 31.3826 137.102 31.2678L134.34 26.4226C134.299 26.3374 134.255 26.3374 134.213 26.4226L131.429 31.2678C131.358 31.3809 131.252 31.4383 131.111 31.4383H128.433C128.333 31.4383 128.262 31.4104 128.22 31.353H128.222Z" fill="currentColor"/>
15
+ <defs>
16
+ <linearGradient id="paint0_linear_17_483" x1="3.99826" y1="24" x2="51.6208" y2="24" gradientUnits="userSpaceOnUse">
17
+ <stop stop-color="#E21680"/>
18
+ <stop offset="1" stop-color="#FF633A"/>
19
+ </linearGradient>
20
+ </defs>
21
+ </svg>
22
+
23
+ </div>
24
+ <hr>
25
+
26
+ <div align="center" style="line-height: 1;">
27
+ <a href="https://www.minimax.io" target="_blank" style="margin: 2px;">
28
+ <img alt="Homepage" src="https://img.shields.io/badge/_Homepage-MiniMax-FF4040?style=flat-square&labelColor=2C3E50&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB2aWV3Qm94PSIwIDAgNDkwLjE2IDQxMS43Ij48ZGVmcz48c3R5bGU+LmNscy0xe2ZpbGw6I2ZmZjt9PC9zdHlsZT48L2RlZnM+PHBhdGggY2xhc3M9ImNscy0xIiBkPSJNMjMzLjQ1LDQwLjgxYTE3LjU1LDE3LjU1LDAsMSwwLTM1LjEsMFYzMzEuNTZhNDAuODIsNDAuODIsMCwwLDEtODEuNjMsMFYxNDVhMTcuNTUsMTcuNTUsMCwxLDAtMzUuMDksMHY3OS4wNmE0MC44Miw0MC44MiwwLDAsMS04MS42MywwVjE5NS40MmExMS42MywxMS42MywwLDAsMSwyMy4yNiwwdjI4LjY2YTE3LjU1LDE3LjU1LDAsMCwwLDM1LjEsMFYxNDVBNDAuODIsNDAuODIsMCwwLDEsMTQwLDE0NVYzMzEuNTZhMTcuNTUsMTcuNTUsMCwwLDAsMzUuMSwwVjIxNy41aDBWNDAuODFhNDAuODEsNDAuODEsMCwxLDEsODEuNjIsMFYyODEuNTZhMTEuNjMsMTEuNjMsMCwxLDEtMjMuMjYsMFptMjE1LjksNjMuNEE0MC44Niw0MC44NiwwLDAsMCw0MDguNTMsMTQ1VjMwMC44NWExNy41NSwxNy41NSwwLDAsMS0zNS4wOSwwdi0yNjBhNDAuODIsNDAuODIsMCwwLDAtODEuNjMsMFYzNzAuODlhMTcuNTUsMTcuNTUsMCwwLDEtMzUuMSwwVjMzMGExMS42MywxMS42MywwLDEsMC0yMy4yNiwwdjQwLjg2YTQwLjgxLDQwLjgxLDAsMCwwLDgxLjYyLDBWNDAuODFhMTcuNTUsMTcuNTUsMCwwLDEsMzUuMSwwdjI2MGE0MC44Miw0MC44MiwwLDAsMCw4MS42MywwVjE0NWExNy41NSwxNy41NSwwLDEsMSwzNS4xLDBWMjgxLjU2YTExLjYzLDExLjYzLDAsMCwwLDIzLjI2LDBWMTQ1QTQwLjg1LDQwLjg1LDAsMCwwLDQ0OS4zNSwxMDQuMjFaIi8+PC9zdmc+&logoWidth=20" style="display: inline-block; vertical-align: middle;"/>
29
+ </a>
30
+ <a href="https://arxiv.org/abs/2501.08313" target="_blank" style="margin: 2px;">
31
+ <img alt="Paper" src="https://img.shields.io/badge/📖_Paper-MiniMax--01-FF4040?style=flat-square&labelColor=2C3E50" style="display: inline-block; vertical-align: middle;"/>
32
+ </a>
33
+ <a href="https://chat.minimax.io/" target="_blank" style="margin: 2px;">
34
+ <img alt="Chat" src="https://img.shields.io/badge/_MiniMax_Chat-FF4040?style=flat-square&labelColor=2C3E50&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB2aWV3Qm94PSIwIDAgNDkwLjE2IDQxMS43Ij48ZGVmcz48c3R5bGU+LmNscy0xe2ZpbGw6I2ZmZjt9PC9zdHlsZT48L2RlZnM+PHBhdGggY2xhc3M9ImNscy0xIiBkPSJNMjMzLjQ1LDQwLjgxYTE3LjU1LDE3LjU1LDAsMSwwLTM1LjEsMFYzMzEuNTZhNDAuODIsNDAuODIsMCwwLDEtODEuNjMsMFYxNDVhMTcuNTUsMTcuNTUsMCwxLDAtMzUuMDksMHY3OS4wNmE0MC44Miw0MC44MiwwLDAsMS04MS42MywwVjE5NS40MmExMS42MywxMS42MywwLDAsMSwyMy4yNiwwdjI4LjY2YTE3LjU1LDE3LjU1LDAsMCwwLDM1LjEsMFYxNDVBNDAuODIsNDAuODIsMCwwLDEsMTQwLDE0NVYzMzEuNTZhMTcuNTUsMTcuNTUsMCwwLDAsMzUuMSwwVjIxNy41aDBWNDAuODFhNDAuODEsNDAuODEsMCwxLDEsODEuNjIsMFYyODEuNTZhMTEuNjMsMTEuNjMsMCwxLDEtMjMuMjYsMFptMjE1LjksNjMuNEE0MC44Niw0MC44NiwwLDAsMCw0MDguNTMsMTQ1VjMwMC44NWExNy41NSwxNy41NSwwLDAsMS0zNS4wOSwwdi0yNjBhNDAuODIsNDAuODIsMCwwLDAtODEuNjMsMFYzNzAuODlhMTcuNTUsMTcuNTUsMCwwLDEtMzUuMSwwVjMzMGExMS42MywxMS42MywwLDEsMC0yMy4yNiwwdjQwLjg2YTQwLjgxLDQwLjgxLDAsMCwwLDgxLjYyLDBWNDAuODFhMTcuNTUsMTcuNTUsMCwwLDEsMzUuMSwwdjI2MGE0MC44Miw0MC44MiwwLDAsMCw4MS42MywwVjE0NWExNy41NSwxNy41NSwwLDEsMSwzNS4xLDBWMjgxLjU2YTExLjYzLDExLjYzLDAsMCwwLDIzLjI2LDBWMTQ1QTQwLjg1LDQwLjg1LDAsMCwwLDQ0OS4zNSwxMDQuMjFaIi8+PC9zdmc+&logoWidth=20" style="display: inline-block; vertical-align: middle;"/>
35
+ </a>
36
+ <a href="https://www.minimax.io/platform" style="margin: 2px;">
37
+ <img alt="API" src="https://img.shields.io/badge/⚡_API-Platform-FF4040?style=flat-square&labelColor=2C3E50" style="display: inline-block; vertical-align: middle;"/>
38
+ </a>
39
+ <a href="https://github.com/MiniMax-AI/MiniMax-MCP" style="margin: 2px;">
40
+ <img alt="MCP" src="https://img.shields.io/badge/🚀_MCP-MiniMax_MCP-FF4040?style=flat-square&labelColor=2C3E50" style="display: inline-block; vertical-align: middle;"/>
41
+ </a>
42
+ </div>
43
+ <div align="center" style="line-height: 1;">
44
+ <a href="https://github.com/MiniMax-AI/MiniMax-01" target="_blank" style="margin: 2px;">
45
+ <img alt="GitHub" src="https://img.shields.io/badge/_GitHub-MinMax-FF4040?style=flat-square&labelColor=2C3E50" style="display: inline-block; vertical-align: middle;"/>
46
+ </a>
47
+ <a href="https://huggingface.co/MiniMaxAI/MiniMax-Text-01/blob/main/LICENSE-MODEL" style="margin: 2px;">
48
+ <img alt="Model License" src="https://img.shields.io/badge/_Model_License-Model_Agreement-FF4040?style=flat-square&labelColor=2C3E50" style="display: inline-block; vertical-align: middle;"/>
49
+ </a>
50
+ <a href="https://huggingface.co/MiniMaxAI/MiniMax-Text-01/blob/main/LICENSE-CODE" style="margin: 2px;">
51
+ <img alt="Code License" src="https://img.shields.io/badge/_Code_License-MIT-FF4040?style=flat-square&labelColor=2C3E50" style="display: inline-block; vertical-align: middle;"/>
52
+ </a>
53
+ </div>
54
+ <div align="center" style="line-height: 1;">
55
+ <a href="https://huggingface.co/MiniMaxAI/MiniMax-Text-01/blob/main/figures/wechat-qrcode.jpeg" target="_blank" style="margin: 2px;">
56
+ WeChat
57
+ </a>
58
+ </div>
59
+
60
+
61
+ # MiniMax-Text-01
62
+
63
+ ## 1. Introduction
64
+
65
+ MiniMax-Text-01 is a powerful language model with 456 billion total parameters, of which 45.9 billion are activated per token. To better unlock the long context capabilities of the model, MiniMax-Text-01 adopts a hybrid architecture that combines Lightning Attention, Softmax Attention and Mixture-of-Experts (MoE). Leveraging advanced parallel strategies and innovative compute-communication overlap methods—such as Linear Attention Sequence Parallelism Plus (LASP+), varlen ring attention, Expert Tensor Parallel (ETP), etc., MiniMax-Text-01's training context length is extended to 1 million tokens, and it can handle a context of up to 4 million tokens during the inference. On various academic benchmarks, MiniMax-Text-01 also demonstrates the performance of a top-tier model.
66
+
67
+ <p align="center">
68
+ <img width="100%" src="figures/TextBench.png">
69
+ </p>
70
+
71
+ ## 2. Model Architecture
72
+
73
+ The architecture of MiniMax-Text-01 is briefly described as follows:
74
+ - Total Parameters: 456B
75
+ - Activated Parameters per Token: 45.9B
76
+ - Number Layers: 80
77
+ - Hybrid Attention: a softmax attention is positioned after every 7 lightning attention.
78
+ - Number of attention heads: 64
79
+ - Attention head dimension: 128
80
+ - Mixture of Experts:
81
+ - Number of experts: 32
82
+ - Expert hidden dimension: 9216
83
+ - Top-2 routing strategy
84
+ - Positional Encoding: Rotary Position Embedding (RoPE) applied to half of the attention head dimension with a base frequency of 10,000,000
85
+ - Hidden Size: 6144
86
+ - Vocab Size: 200,064
87
+
88
+ ## 3. Evaluation
89
+
90
+ ### Core Academic Benchmarks
91
+
92
+ | **Tasks** | **GPT-4o (11-20)** | **Claude-3.5-Sonnet (10-22)** | **Gemini-1.5-Pro (002)** | **Gemini-2.0-Flash (exp)** | **Qwen2.5-72B-Inst.** | **DeepSeek-V3** | **Llama-3.1-405B-Inst.** | **MiniMax-Text-01** |
93
+ |-------------------------------|--------------------|-------------------------------|--------------------------|----------------------------|-----------------------|-----------------|--------------------------|---------------------|
94
+ | **General** | | | | | | | | |
95
+ | MMLU<sup>*</sup> | 85.7 | 88.3 | 86.8 | 86.5 | 86.1 | 88.5 | **88.6** | 88.5 |
96
+ | MMLU-Pro<sup>*</sup> | 74.4 | **78.0** | 75.8 | 76.4 | 71.1 | 75.9 | 73.3 | 75.7 |
97
+ | SimpleQA | **39.0** | 28.1 | 23.4 | 26.6 | 10.3 | 24.9 | 23.2 | 23.7 |
98
+ | C-SimpleQA | 64.6 | 56.8 | 59.4 | 63.3 | 52.2 | 64.8 | 54.7 | **67.4** |
99
+ | IFEval _(avg)_ | 84.1 | **90.1** | 89.4 | 88.4 | 87.2 | 87.3 | 86.4 | 89.1 |
100
+ | Arena-Hard | **92.4** | 87.6 | 85.3 | 72.7 | 81.2 | 91.4 | 63.5 | 89.1 |
101
+ | **Reasoning** | | | | | | | | |
102
+ | GPQA<sup>*</sup> _(diamond)_ | 46.0 | **65.0** | 59.1 | 62.1 | 49.0 | 59.1 | 50.7 | 54.4 |
103
+ | DROP<sup>*</sup> _(F1)_ | 89.2 | 88.8 | 89.2 | 89.3 | 85.0 | 91.0 | **92.5** | 87.8 |
104
+ | **Mathematics** | | | | | | | | |
105
+ | GSM8k<sup>*</sup> | 95.6 | **96.9** | 95.2 | 95.4 | 95.8 | 96.7 | 96.7 | 94.8 |
106
+ | MATH<sup>*</sup> | 76.6 | 74.1 | **84.6** | 83.9 | 81.8 | **84.6** | 73.8 | 77.4 |
107
+ | **Coding** | | | | | | | | |
108
+ | MBPP + | 76.2 | 75.1 | 75.4 | 75.9 | 77.0 | **78.8** | 73.0 | 71.7 |
109
+ | HumanEval | 90.2 | **93.7** | 86.6 | 89.6 | 86.6 | 92.1 | 89.0 | 86.9 |
110
+
111
+ <sup>*</sup> Evaluated following a _0-shot CoT_ setting.
112
+
113
+ ### Long Benchmarks
114
+ #### 4M Needle In A Haystack Test
115
+ <p align="center">
116
+ <img width="90%" src="figures/niah.png">
117
+ </p>
118
+
119
+ #### Ruler
120
+ | Model | 4k | 8k | 16k | 32k | 64k | 128k | 256k | 512k | 1M |
121
+ |-------|----|----|-----|-----|-----|------|------|------|----|
122
+ | **GPT-4o (11-20)** | **0.970** | 0.921 | 0.890 | 0.888 | 0.884 | - | - | - | - |
123
+ | **Claude-3.5-Sonnet (10-22)** | 0.965 | 0.960 | 0.957 | 0.950 | **0.952** | 0.938 | - | - | - |
124
+ | **Gemini-1.5-Pro (002)** | 0.962 | 0.960 | **0.960** | **0.958** | 0.938 | 0.917 | 0.916 | 0.861 | 0.850 |
125
+ | **Gemini-2.0-Flash (exp)** | 0.960 | 0.960 | 0.951 | 0.957 | 0.937 | 0.860 | 0.797 | 0.709 | - |
126
+ | **MiniMax-Text-01** | 0.963 | **0.961** | 0.953 | 0.954 | 0.943 | **0.947** | **0.945** | **0.928** | **0.910** |
127
+
128
+ #### LongBench v2
129
+ | **Model** | **overall** | **easy** | **hard** | **short** | **medium** | **long** |
130
+ |----------------------------|-------------|----------|----------|------------|------------|----------|
131
+ | Human | 53.7 | 100.0 | 25.1 | 47.2 | 59.1 | 53.7 |
132
+ | **w/ CoT** | | | | | | |
133
+ | GPT-4o (11-20) | 51.4 | 54.2 | 49.7 | 59.6 | 48.6 | 43.5 |
134
+ | Claude-3.5-Sonnet (10-22) | 46.7 | 55.2 | 41.5 | 53.9 | 41.9 | 44.4 |
135
+ | Deepseek-V3 | - | - | - | - | - | - |
136
+ | Qwen2.5-72B-Inst. | 43.5 | 47.9 | 40.8 | 48.9 | 40.9 | 39.8 |
137
+ | **MiniMax-Text-01** | **56.5** | **66.1** | **50.5** | **61.7** | **56.7** | **47.2** |
138
+ | **w/o CoT** | | | | | | |
139
+ | GPT-4o (11-20) | 50.1 | 57.4 | 45.6 | 53.3 | 52.4 | 40.2 |
140
+ | Claude-3.5-Sonnet (10-22) | 41.0 | 46.9 | 37.3 | 46.1 | 38.6 | 37.0 |
141
+ | Deepseek-V3 | 48.7 | - | - | - | - | - |
142
+ | Qwen2.5-72B-Inst. | 42.1 | 42.7 | 41.8 | 45.6 | 38.1 | **44.4** |
143
+ | **MiniMax-Text-01** | **52.9** | **60.9** | **47.9** | **58.9** | **52.6** | 43.5 |
144
+
145
+ #### MTOB
146
+ | **Context Type** | **no context** | **half book** | **full book** | **Δ half book** | **Δ full book** |
147
+ |------------------|----------------|---------------|---------------|------------------|-----------------|
148
+ | **eng → kalam (ChrF)** | | | | | |
149
+ | GPT-4o (11-20) | 9.90 | **54.30** | - | 44.40 | - |
150
+ | Claude-3.5-Sonnet (10-22) | 20.22 | 53.62 | 55.65 | 33.39 | 35.42 |
151
+ | Gemini-1.5-Pro (002) | 16.79 | 53.68 | **57.90** | 36.89 | 41.11 |
152
+ | Gemini-2.0-Flash (exp) | 12.20 | 49.50 | 53.30 | 37.30 | 41.10 |
153
+ | Qwen-Long | 16.55 | 48.48 | 45.94 | 31.92 | 29.39 |
154
+ | **MiniMax-Text-01** | 6.0 | 51.74 | 51.60 | **45.7** | **45.6** |
155
+ | **kalam → eng (BLEURT)** | | | | | |
156
+ | GPT-4o (11-20) | 33.20 | 58.30 | - | 25.10 | - |
157
+ | Claude-3.5-Sonnet (10-22) | 31.42 | 59.70 | 62.30 | 28.28 | 30.88 |
158
+ | Gemini-1.5-Pro (002) | 32.02 | **61.52** | **63.09** | **29.50** | **31.07** |
159
+ | Gemini-2.0-Flash (exp) | 33.80 | 57.50 | 57.00 | 23.70 | 23.20 |
160
+ | Qwen-Long | 30.13 | 53.14 | 32.15 | 23.01 | 2.02 |
161
+ | **MiniMax-Text-01** | 33.65 | 57.10 | 58.00 | 23.45 | 24.35 |
162
+
163
+
164
+ ## 4. Quickstart
165
+ Here we provide a simple example of loading the tokenizer and model to generate content.
166
+ ```python
167
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, QuantoConfig, GenerationConfig
168
+
169
+ # load hf config
170
+ hf_config = AutoConfig.from_pretrained("MiniMaxAI/MiniMax-Text-01", trust_remote_code=True)
171
+
172
+ # quantization config, int8 is recommended
173
+ quantization_config = QuantoConfig(
174
+ weights="int8",
175
+ modules_to_not_convert=[
176
+ "lm_head",
177
+ "embed_tokens",
178
+ ] + [f"model.layers.{i}.coefficient" for i in range(hf_config.num_hidden_layers)]
179
+ + [f"model.layers.{i}.block_sparse_moe.gate" for i in range(hf_config.num_hidden_layers)]
180
+ )
181
+
182
+ # assume 8 GPUs
183
+ world_size = 8
184
+ layers_per_device = hf_config.num_hidden_layers // world_size
185
+ # set device map
186
+ device_map = {
187
+ 'model.embed_tokens': 'cuda:0',
188
+ 'model.norm': f'cuda:{world_size - 1}',
189
+ 'lm_head': f'cuda:{world_size - 1}'
190
+ }
191
+ for i in range(world_size):
192
+ for j in range(layers_per_device):
193
+ device_map[f'model.layers.{i * layers_per_device + j}'] = f'cuda:{i}'
194
+
195
+ # load tokenizer
196
+ tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-Text-01")
197
+ prompt = "Hello!"
198
+ messages = [
199
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant created by MiniMax based on MiniMax-Text-01 model."}]},
200
+ {"role": "user", "content": [{"type": "text", "text": prompt}]},
201
+ ]
202
+ text = tokenizer.apply_chat_template(
203
+ messages,
204
+ tokenize=False,
205
+ add_generation_prompt=True
206
+ )
207
+ # tokenize and move to device
208
+ model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
209
+
210
+ # load bfloat16 model, move to device, and apply quantization
211
+ quantized_model = AutoModelForCausalLM.from_pretrained(
212
+ "MiniMaxAI/MiniMax-Text-01",
213
+ torch_dtype="bfloat16",
214
+ device_map=device_map,
215
+ quantization_config=quantization_config,
216
+ trust_remote_code=True,
217
+ offload_buffers=True,
218
+ )
219
+
220
+ # generate response
221
+ generation_config = GenerationConfig(
222
+ max_new_tokens=20,
223
+ eos_token_id=200020,
224
+ use_cache=True,
225
+ )
226
+ generated_ids = quantized_model.generate(**model_inputs, generation_config=generation_config)
227
+ print(f"generated_ids: {generated_ids}")
228
+ generated_ids = [
229
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
230
+ ]
231
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
232
+ ```
233
+
234
+ ## 5. Deployment Guide
235
+ For production deployment, we recommend using [vLLM](https://docs.vllm.ai/en/latest/) to serve MiniMax-Text-01. vLLM provides excellent performance for serving large language models with the following features:
236
+
237
+ 🔥 Outstanding service throughput performance
238
+ ⚡ Efficient and intelligent memory management
239
+ 📦 Powerful batch request processing capability
240
+ ⚙️ Deeply optimized underlying performance
241
+
242
+ For detailed deployment instructions, please refer to our [vLLM Deployment Guide](https://github.com/MiniMax-AI/MiniMax-01/blob/main/docs/vllm_deployment_guide.md).
243
+
244
+ ## 6. Function Calling
245
+ MiniMax-Text-01 supports Function Calling capability, enabling the model to intelligently identify when external functions need to be called and output parameters in structured JSON format. With Function Calling, you can:
246
+ - Let the model recognize implicit function call needs in user requests
247
+ - Receive structured parameter outputs for seamless application integration
248
+ - Support various complex parameter types, including nested objects and arrays
249
+ Function Calling supports standard OpenAI-compatible format definitions and integrates seamlessly with the Transformers library. For detailed usage instructions, please refer to our [Function Call Guide](./MiniMax-Text-01_Function_Call_Guide.md) or [Chinese Guide](./MiniMax-Text-01_Function_Call_Guide_CN.md).
250
+
251
+ ## 7. Citation
252
+
253
+ ```
254
+ @misc{minimax2025minimax01scalingfoundationmodels,
255
+ title={MiniMax-01: Scaling Foundation Models with Lightning Attention},
256
+ author={MiniMax and Aonian Li and Bangwei Gong and Bo Yang and Boji Shan and Chang Liu and Cheng Zhu and Chunhao Zhang and Congchao Guo and Da Chen and Dong Li and Enwei Jiao and Gengxin Li and Guojun Zhang and Haohai Sun and Houze Dong and Jiadai Zhu and Jiaqi Zhuang and Jiayuan Song and Jin Zhu and Jingtao Han and Jingyang Li and Junbin Xie and Junhao Xu and Junjie Yan and Kaishun Zhang and Kecheng Xiao and Kexi Kang and Le Han and Leyang Wang and Lianfei Yu and Liheng Feng and Lin Zheng and Linbo Chai and Long Xing and Meizhi Ju and Mingyuan Chi and Mozhi Zhang and Peikai Huang and Pengcheng Niu and Pengfei Li and Pengyu Zhao and Qi Yang and Qidi Xu and Qiexiang Wang and Qin Wang and Qiuhui Li and Ruitao Leng and Shengmin Shi and Shuqi Yu and Sichen Li and Songquan Zhu and Tao Huang and Tianrun Liang and Weigao Sun and Weixuan Sun and Weiyu Cheng and Wenkai Li and Xiangjun Song and Xiao Su and Xiaodong Han and Xinjie Zhang and Xinzhu Hou and Xu Min and Xun Zou and Xuyang Shen and Yan Gong and Yingjie Zhu and Yipeng Zhou and Yiran Zhong and Yongyi Hu and Yuanxiang Fan and Yue Yu and Yufeng Yang and Yuhao Li and Yunan Huang and Yunji Li and Yunpeng Huang and Yunzhi Xu and Yuxin Mao and Zehan Li and Zekang Li and Zewei Tao and Zewen Ying and Zhaoyang Cong and Zhen Qin and Zhenhua Fan and Zhihang Yu and Zhuo Jiang and Zijia Wu},
257
+ year={2025},
258
+ eprint={2501.08313},
259
+ archivePrefix={arXiv},
260
+ primaryClass={cs.CL},
261
+ url={https://arxiv.org/abs/2501.08313},
262
+ }
263
+ ```
264
+
265
+ ## 8. Chatbot & API
266
+ For general use and evaluation, we provide a [Chatbot](https://chat.minimax.io/) with online search capabilities and the [online API](https://www.minimax.io/platform) for developers. For general use and evaluation, we provide the [MiniMax MCP Server](https://github.com/MiniMax-AI/MiniMax-MCP) with video generation, image generation, speech synthesis, and voice cloning for developers.
267
+
268
+ ## 9. Contact Us
269
+ Contact us at [[email protected]](mailto:[email protected]).
config.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MiniMaxText01ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "layer_types": [
7
+ 0,
8
+ 0,
9
+ 0,
10
+ 0,
11
+ 0,
12
+ 0,
13
+ 0,
14
+ 1,
15
+ 0,
16
+ 0,
17
+ 0,
18
+ 0,
19
+ 0,
20
+ 0,
21
+ 0,
22
+ 1,
23
+ 0,
24
+ 0,
25
+ 0,
26
+ 0,
27
+ 0,
28
+ 0,
29
+ 0,
30
+ 1,
31
+ 0,
32
+ 0,
33
+ 0,
34
+ 0,
35
+ 0,
36
+ 0,
37
+ 0,
38
+ 1,
39
+ 0,
40
+ 0,
41
+ 0,
42
+ 0,
43
+ 0,
44
+ 0,
45
+ 0,
46
+ 1,
47
+ 0,
48
+ 0,
49
+ 0,
50
+ 0,
51
+ 0,
52
+ 0,
53
+ 0,
54
+ 1,
55
+ 0,
56
+ 0,
57
+ 0,
58
+ 0,
59
+ 0,
60
+ 0,
61
+ 0,
62
+ 1,
63
+ 0,
64
+ 0,
65
+ 0,
66
+ 0,
67
+ 0,
68
+ 0,
69
+ 0,
70
+ 1,
71
+ 0,
72
+ 0,
73
+ 0,
74
+ 0,
75
+ 0,
76
+ 0,
77
+ 0,
78
+ 1,
79
+ 0,
80
+ 0,
81
+ 0,
82
+ 0,
83
+ 0,
84
+ 0,
85
+ 0,
86
+ 1
87
+ ],
88
+ "auto_map": {
89
+ "AutoConfig": "configuration_minimax_text_01.MiniMaxText01Config",
90
+ "AutoModelForCausalLM": "modeling_minimax_text_01.MiniMaxText01ForCausalLM"
91
+ },
92
+ "bos_token_id": null,
93
+ "eos_token_id": null,
94
+ "head_dim": 128,
95
+ "hidden_act": "silu",
96
+ "hidden_size": 6144,
97
+ "initializer_range": 0.02,
98
+ "intermediate_size": 9216,
99
+ "layernorm_full_attention_alpha": 3.5565588200778455,
100
+ "layernorm_full_attention_beta": 1.0,
101
+ "layernorm_linear_attention_alpha": 3.5565588200778455,
102
+ "layernorm_linear_attention_beta": 1.0,
103
+ "layernorm_mlp_alpha": 3.5565588200778455,
104
+ "layernorm_mlp_beta": 1.0,
105
+ "max_position_embeddings": 10240000,
106
+ "model_type": "minimax_text_01",
107
+ "num_attention_heads": 64,
108
+ "num_experts_per_tok": 2,
109
+ "num_hidden_layers": 80,
110
+ "num_key_value_heads": 8,
111
+ "num_local_experts": 32,
112
+ "output_router_logits": false,
113
+ "postnorm": true,
114
+ "rms_norm_eps": 1e-05,
115
+ "rope_theta": 10000000,
116
+ "rotary_dim": 64,
117
+ "router_aux_loss_coef": 0.001,
118
+ "router_jitter_noise": 0.0,
119
+ "shared_intermediate_size": 0,
120
+ "shared_moe_mode": "sigmoid",
121
+ "sliding_window": null,
122
+ "tie_word_embeddings": false,
123
+ "transformers_version": "4.45.2",
124
+ "use_cache": true,
125
+ "vocab_size": 200064
126
+ }
configuration_minimax_text_01.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ MiniMaxText01 model configuration"""
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+
7
+ logger = logging.get_logger(__name__)
8
+
9
+
10
+ class MiniMaxText01Config(PretrainedConfig):
11
+ r"""
12
+ This is the configuration class to store the configuration of a [`MiniMaxText01Model`]. It is used to instantiate an
13
+ MiniMaxText01 model according to the specified arguments, defining the model architecture. Instantiating a configuration
14
+ with the defaults will yield a similar configuration to that of the MiniMaxText01.
15
+
16
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
17
+ documentation from [`PretrainedConfig`] for more information.
18
+
19
+
20
+ Args:
21
+ vocab_size (`int`, *optional*, defaults to 32000):
22
+ Vocabulary size of the MiniMaxText01 model. Defines the number of different tokens that can be represented by the
23
+ `inputs_ids` passed when calling [`MiniMaxText01Model`]
24
+ hidden_size (`int`, *optional*, defaults to 4096):
25
+ Dimension of the hidden representations.
26
+ intermediate_size (`int`, *optional*, defaults to 14336):
27
+ Dimension of the MLP representations.
28
+ num_hidden_layers (`int`, *optional*, defaults to 32):
29
+ Number of hidden layers in the Transformer encoder.
30
+ num_attention_heads (`int`, *optional*, defaults to 32):
31
+ Number of attention heads for each attention layer in the Transformer encoder.
32
+ num_key_value_heads (`int`, *optional*, defaults to 8):
33
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
34
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
35
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
36
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
37
+ by meanpooling all the original heads within that group. For more details checkout [this
38
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
39
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
40
+ The non-linear activation function (function or string) in the decoder.
41
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
42
+ The maximum sequence length that this model might ever be used with. MiniMaxText01's sliding window attention
43
+ allows sequence of up to 4096*32 tokens.
44
+ initializer_range (`float`, *optional*, defaults to 0.02):
45
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
46
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
47
+ The epsilon used by the rms normalization layers.
48
+ use_cache (`bool`, *optional*, defaults to `True`):
49
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
50
+ relevant if `config.is_decoder=True`.
51
+ pad_token_id (`int`, *optional*):
52
+ The id of the padding token.
53
+ bos_token_id (`int`, *optional*, defaults to 1):
54
+ The id of the "beginning-of-sequence" token.
55
+ eos_token_id (`int`, *optional*, defaults to 2):
56
+ The id of the "end-of-sequence" token.
57
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
58
+ Whether the model's input and output word embeddings should be tied.
59
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
60
+ The base period of the RoPE embeddings.
61
+ sliding_window (`int`, *optional*):
62
+ Sliding window attention window size. If not specified, will default to `4096`.
63
+ attention_dropout (`float`, *optional*, defaults to 0.0):
64
+ The dropout ratio for the attention probabilities.
65
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
66
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
67
+ parameter
68
+ num_local_experts (`int`, *optional*, defaults to 8):
69
+ Number of experts per Sparse MLP layer.
70
+ output_router_logits (`bool`, *optional*, defaults to `False`):
71
+ Whether or not the router logits should be returned by the model. Enabeling this will also
72
+ allow the model to output the auxiliary loss. See [here]() for more details
73
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
74
+ The aux loss factor for the total loss.
75
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
76
+ Amount of noise to add to the router.
77
+
78
+ ```python
79
+ >>> from transformers import MiniMaxText01Model, MiniMaxText01Config
80
+
81
+ >>> # Initializing a MiniMaxText01 style configuration
82
+ >>> configuration = MiniMaxText01Config()
83
+
84
+ >>> # Initializing a model from the MiniMaxText01 style configuration
85
+ >>> model = MiniMaxText01Model(configuration)
86
+
87
+ >>> # Accessing the model configuration
88
+ >>> configuration = model.config
89
+ ```"""
90
+
91
+ model_type = "MiniMaxText01"
92
+ keys_to_ignore_at_inference = ["past_key_values"]
93
+
94
+ def __init__(
95
+ self,
96
+ vocab_size=32000,
97
+ hidden_size=4096,
98
+ intermediate_size=14336,
99
+ num_hidden_layers=32,
100
+ num_attention_heads=32,
101
+ num_key_value_heads=8,
102
+ hidden_act="silu",
103
+ max_position_embeddings=4096 * 32,
104
+ initializer_range=0.02,
105
+ rms_norm_eps=1e-5,
106
+ use_cache=True,
107
+ pad_token_id=None,
108
+ bos_token_id=None,
109
+ eos_token_id=None,
110
+ tie_word_embeddings=False,
111
+ rope_theta=1e6,
112
+ sliding_window=None,
113
+ attention_dropout=0.0,
114
+ num_experts_per_tok=2,
115
+ num_local_experts=8,
116
+ output_router_logits=False,
117
+ router_aux_loss_coef=0.001,
118
+ router_jitter_noise=0.0,
119
+ **kwargs,
120
+ ):
121
+ self.vocab_size = vocab_size
122
+ self.max_position_embeddings = max_position_embeddings
123
+ self.hidden_size = hidden_size
124
+ self.intermediate_size = intermediate_size
125
+ self.num_hidden_layers = num_hidden_layers
126
+ self.num_attention_heads = num_attention_heads
127
+ self.sliding_window = sliding_window
128
+
129
+ # for backward compatibility
130
+ if num_key_value_heads is None:
131
+ num_key_value_heads = num_attention_heads
132
+
133
+ self.num_key_value_heads = num_key_value_heads
134
+ self.hidden_act = hidden_act
135
+ self.initializer_range = initializer_range
136
+ self.rms_norm_eps = rms_norm_eps
137
+ self.use_cache = use_cache
138
+ self.rope_theta = rope_theta
139
+ self.attention_dropout = attention_dropout
140
+
141
+ self.num_experts_per_tok = num_experts_per_tok
142
+ self.num_local_experts = num_local_experts
143
+ self.output_router_logits = output_router_logits
144
+ self.router_aux_loss_coef = router_aux_loss_coef
145
+ self.router_jitter_noise = router_jitter_noise
146
+ super().__init__(
147
+ pad_token_id=pad_token_id,
148
+ bos_token_id=bos_token_id,
149
+ eos_token_id=eos_token_id,
150
+ tie_word_embeddings=tie_word_embeddings,
151
+ **kwargs,
152
+ )
figures/MiniMaxLogo.png ADDED

Git LFS Details

  • SHA256: 8b31758e26d66556e0ac721865584ba70653cd585e9b8529f0a835a2b5849fa7
  • Pointer size: 130 Bytes
  • Size of remote file: 45.3 kB
figures/TextBench.png ADDED

Git LFS Details

  • SHA256: 2c448f4616f7fe6c266005d5ef03cb323e565d2ca1dfb2288be86fe7875aa1ae
  • Pointer size: 131 Bytes
  • Size of remote file: 420 kB
figures/VisionBench.png ADDED

Git LFS Details

  • SHA256: 4a3ed4b165e398e31c1d26708c11571283d87400c1b423d402f22a3933d52be9
  • Pointer size: 131 Bytes
  • Size of remote file: 440 kB
figures/hailuo.svg ADDED
figures/image.jpg ADDED

Git LFS Details

  • SHA256: b07e9e9df6a993a802382409d70732e07bff1ffa85fb9decce4c5ae540681014
  • Pointer size: 131 Bytes
  • Size of remote file: 401 kB
figures/minimax.svg ADDED
figures/niah.png ADDED

Git LFS Details

  • SHA256: 73fbd47b590198dad0ea6be7c45c35ce738a2978deb893c842721f0f0cf02eb8
  • Pointer size: 132 Bytes
  • Size of remote file: 1.47 MB
figures/wechat-qrcode.jpeg ADDED

Git LFS Details

  • SHA256: 491e70d0c024055a1c8899ec8de9ce083b1a3ffb4f50ff82d9ca7b6c691d167e
  • Pointer size: 130 Bytes
  • Size of remote file: 90.7 kB
main.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, QuantoConfig, GenerationConfig
2
+ import torch
3
+ import argparse
4
+
5
+ """
6
+ usage:
7
+ export SAFETENSORS_FAST_GPU=1
8
+ python main.py --quant_type int8 --world_size 8 --model_id <model_path>
9
+ """
10
+
11
+ def generate_quanto_config(hf_config: AutoConfig, quant_type: str):
12
+ QUANT_TYPE_MAP = {
13
+ "default": None,
14
+ "int8": QuantoConfig(
15
+ weights="int8",
16
+ modules_to_not_convert=[
17
+ "lm_head",
18
+ "embed_tokens",
19
+ ] + [f"model.layers.{i}.coefficient" for i in range(hf_config.num_hidden_layers)]
20
+ + [f"model.layers.{i}.block_sparse_moe.gate" for i in range(hf_config.num_hidden_layers)]
21
+ ),
22
+ }
23
+ return QUANT_TYPE_MAP[quant_type]
24
+
25
+
26
+ def parse_args():
27
+ parser = argparse.ArgumentParser()
28
+ parser.add_argument("--quant_type", type=str, default="default", choices=["default", "int8"])
29
+ parser.add_argument("--model_id", type=str, required=True)
30
+ parser.add_argument("--world_size", type=int, required=True)
31
+ return parser.parse_args()
32
+
33
+
34
+ def check_params(args, hf_config: AutoConfig):
35
+ if args.quant_type == "int8":
36
+ assert args.world_size >= 8, "int8 weight-only quantization requires at least 8 GPUs"
37
+
38
+ assert hf_config.num_hidden_layers % args.world_size == 0, f"num_hidden_layers({hf_config.num_hidden_layers}) must be divisible by world_size({args.world_size})"
39
+
40
+
41
+ @torch.no_grad()
42
+ def main():
43
+ args = parse_args()
44
+ print("\n=============== Argument ===============")
45
+ for key in vars(args):
46
+ print(f"{key}: {vars(args)[key]}")
47
+ print("========================================")
48
+
49
+ model_id = args.model_id
50
+
51
+ hf_config = AutoConfig.from_pretrained(model_id, trust_remote_code=True)
52
+ check_params(args, hf_config)
53
+ quantization_config = generate_quanto_config(hf_config, args.quant_type)
54
+
55
+ device_map = {
56
+ 'model.embed_tokens': 'cuda:0',
57
+ 'model.norm': f'cuda:{args.world_size - 1}',
58
+ 'lm_head': f'cuda:{args.world_size - 1}'
59
+ }
60
+ layers_per_device = hf_config.num_hidden_layers // args.world_size
61
+ for i in range(args.world_size):
62
+ for j in range(layers_per_device):
63
+ device_map[f'model.layers.{i * layers_per_device + j}'] = f'cuda:{i}'
64
+
65
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
66
+ prompt = "Hello!"
67
+ messages = [
68
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant created by Minimax based on MiniMax-Text-01 model."}]},
69
+ {"role": "user", "content": [{"type": "text", "text": prompt}]},
70
+ ]
71
+ text = tokenizer.apply_chat_template(
72
+ messages,
73
+ tokenize=False,
74
+ add_generation_prompt=True
75
+ )
76
+ model_inputs = tokenizer(text, return_tensors="pt").to("cuda")
77
+ quantized_model = AutoModelForCausalLM.from_pretrained(
78
+ model_id,
79
+ torch_dtype="bfloat16",
80
+ device_map=device_map,
81
+ quantization_config=quantization_config,
82
+ trust_remote_code=True,
83
+ offload_buffers=True,
84
+ )
85
+ generation_config = GenerationConfig(
86
+ max_new_tokens=20,
87
+ eos_token_id=200020,
88
+ use_cache=True,
89
+ )
90
+ generated_ids = quantized_model.generate(**model_inputs, generation_config=generation_config)
91
+ print(f"generated_ids: {generated_ids}")
92
+ generated_ids = [
93
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
94
+ ]
95
+ response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
96
+ print(response)
97
+
98
+ if __name__ == "__main__":
99
+ main()
100
+
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00000-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45b85926994c2ee092c2319158471ed9262d146bd5973c442e135dae9e21624d
3
+ size 4916773000
model-00001-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cdaea944f170b60d206e41a80accbfcf5b9c74744f014a819c30f45cb9a9130
3
+ size 2191113152
model-00002-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f15b83f1afd5c5da8853b7d9bd2c9814dbcb2de7a2f1a24765e16aa7a310d82
3
+ size 2330307784
model-00003-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83f66465aca90c07b247e218950c1d03594dc08c7170ad2e1a8aaa82b26612fe
3
+ size 2254810656
model-00004-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ddaa7380c6dc2cc109b209bfb8450f56bd88f03bcba19f9f0735d24d65a1ed
3
+ size 2116402376
model-00005-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99060d0311ad70c53858396b8fbf2a24e2476b965e809335aac99eb3a536c685
3
+ size 2103016184
model-00006-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4785d81a7df999ea5ffd7688ddc2228c607c6b01173e6e53cbe718fa2da6f8b2
3
+ size 2254810688
model-00007-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1306d2e054fd20b6900b6d50965ee12257116f5967747a6cfe6dcfd5d8d4f8cd
3
+ size 2116402392
model-00008-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afb8c7602baf0a815e8ad02cbecfb80a9ae5d29f9ce10a578762544a5de1c0cb
3
+ size 2202839784
model-00009-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1d8a6c70f49e8adaf8dafd15c8235be9331ffb975f2dce5ffe986cfca770598
3
+ size 2151680440
model-00010-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f94ec16a2f79adf7fa7eb9ba583fe5441146a8ee9405eb3a9e4ba62ce58b0016
3
+ size 2264926800
model-00011-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f4bd5579bb8314355e60ab945da0d43332914854a6877e1c7e29c0796bdcc45
3
+ size 2151680448
model-00012-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ad1363efef21cc1eff543eb1e18cdc35b706925468b31746589dd5520b27c44
3
+ size 2264926776
model-00013-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4ed9d125a753491a1a52257e4c3313c81e4b23ceec1ea92060f21fda6506d33
3
+ size 2151680456
model-00014-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3874e30152a24167cffb45a368d644e848b39191a18250c8baa372badf1404fa
3
+ size 2264926792
model-00015-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33fa7b68a1ca03cd36dc34b43098a4342cb90b6ab29a4ed81bbc5fd22ff3206b
3
+ size 2151680440
model-00016-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52a2cdc9917920ae2e678dd8c8f5118bd269310dc24e09a9795def0bfc6db289
3
+ size 2264926792
model-00017-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:966bb346a62cb5bcaf692cae407fb53e3ba58a6a7798a5012e7979110d44debb
3
+ size 2151680456
model-00018-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6acadaecce036887bbf3d9e80bd9765f3f63ed147b0318c47dc151c484ee5dea
3
+ size 2264926776
model-00019-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d45b4bfc0d98ffea2bbfa70672199845c424645139170705d1a615cb4b32bedf
3
+ size 2151680456
model-00020-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4d89e6f890817b4eaacbb61d447ea9741a1ae1750bb6b555f49f97eb53eed3a
3
+ size 2264926792
model-00021-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:989d3448acab544b95096da7cb3fa2530c658c54eb9a067ef0f9735d0fed98e9
3
+ size 2151680440
model-00022-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4079b9211347c16ab94cf92b3be67b2506f7fd22924feeb886c511878ada34a
3
+ size 2264926792
model-00023-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa5667b97d73be5c951a84fd5141d7e5098854e386b08de3ad2d4a26619121d0
3
+ size 2151680456
model-00024-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1b4cfc292b3496550591242fa27d8853f48f26952a68a62264559e2a8a7026c
3
+ size 2264926776
model-00025-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0084a9a0f19e13b06b7fd6c2ab77212ecabc1237991715a4cc0a8b3760bab059
3
+ size 2151680456
model-00026-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec99d07905384e93ff617910268fd614403273bb72a0e9a66e23477609404d39
3
+ size 2264926800
model-00027-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf1ae16fe8da6e760ccdcbff16b81047460783c12a97e35572db64b705832b36
3
+ size 2151680440
model-00028-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce157751c2a5278886267e41e2ad71a8fdc7bdd5f6fa798c939234992dd11ab2
3
+ size 2264926792
model-00029-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7683f74793f627034ee64af8bac8fdd15ebc0e50127cc9779668091ca3410398
3
+ size 2151680456
model-00030-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a713332d8255b32f06f8ca0ff73b6a847c2e788f3086d26e921aa927b1630b2
3
+ size 2264926776
model-00031-of-00413.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ddc6b163c1970bce84c2c78a34e426873dadda5b5ef1d574e044f914ffae47d
3
+ size 2151680448