Image-Text-to-Text
Transformers
Safetensors
internvl_chat
feature-extraction
conversational
custom_code
lkdhy commited on
Commit
e2b664b
·
verified ·
1 Parent(s): 4239fff

Upload 29 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</box>": 92552,
3
+ "</img>": 92545,
4
+ "</quad>": 92548,
5
+ "</ref>": 92550,
6
+ "<IMG_CONTEXT>": 92546,
7
+ "<box>": 92551,
8
+ "<img>": 92544,
9
+ "<quad>": 92547,
10
+ "<ref>": 92549
11
+ }
args.json ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "/root/model/InternVL2_5-8B",
3
+ "model_type": "internvl2_5",
4
+ "model_revision": null,
5
+ "task_type": "causal_lm",
6
+ "torch_dtype": "bfloat16",
7
+ "attn_impl": null,
8
+ "num_labels": null,
9
+ "problem_type": null,
10
+ "rope_scaling": null,
11
+ "device_map": null,
12
+ "max_memory": {},
13
+ "local_repo_path": null,
14
+ "template": "internvl2_5",
15
+ "system": "Please carefully observe the image, thoroughly understand the conditions provided in the question, use logical reasoning to arrive at the result, and reflect on and verify the reasoning process to ensure the accuracy of the answer. Finally, provide the correct answer.",
16
+ "max_length": 2048,
17
+ "truncation_strategy": "left",
18
+ "max_pixels": null,
19
+ "tools_prompt": "react_en",
20
+ "norm_bbox": null,
21
+ "response_prefix": null,
22
+ "padding_side": "right",
23
+ "loss_scale": "last_round",
24
+ "sequence_parallel_size": 1,
25
+ "use_chat_template": true,
26
+ "template_backend": "swift",
27
+ "dataset": [
28
+ "/mnt/data/user/zhao_jun/tangjixin/sample_data/games_new_v20_5k.json"
29
+ ],
30
+ "val_dataset": [],
31
+ "split_dataset_ratio": 0.01,
32
+ "data_seed": 42,
33
+ "dataset_num_proc": 4,
34
+ "streaming": false,
35
+ "enable_cache": false,
36
+ "download_mode": "reuse_dataset_if_exists",
37
+ "columns": {},
38
+ "strict": false,
39
+ "remove_unused_columns": false,
40
+ "model_name": [
41
+ null,
42
+ null
43
+ ],
44
+ "model_author": [
45
+ null,
46
+ null
47
+ ],
48
+ "custom_dataset_info": [],
49
+ "quant_method": null,
50
+ "quant_bits": null,
51
+ "hqq_axis": null,
52
+ "bnb_4bit_compute_dtype": "bfloat16",
53
+ "bnb_4bit_quant_type": "nf4",
54
+ "bnb_4bit_use_double_quant": true,
55
+ "bnb_4bit_quant_storage": null,
56
+ "max_new_tokens": 64,
57
+ "temperature": 1.0,
58
+ "top_k": 50,
59
+ "top_p": 0.85,
60
+ "repetition_penalty": 1.0,
61
+ "num_beams": 1,
62
+ "stream": false,
63
+ "stop_words": [],
64
+ "logprobs": false,
65
+ "top_logprobs": null,
66
+ "ckpt_dir": null,
67
+ "load_dataset_config": null,
68
+ "lora_modules": [],
69
+ "tuner_backend": "peft",
70
+ "train_type": "full",
71
+ "adapters": [],
72
+ "external_plugins": [
73
+ "/mnt/data/user/zhao_jun/tangjixin/ms-swift/examples/train/grpo/plugin/plugin.py"
74
+ ],
75
+ "seed": 42,
76
+ "model_kwargs": {},
77
+ "load_args": false,
78
+ "load_data_args": false,
79
+ "use_hf": false,
80
+ "hub_token": null,
81
+ "custom_register_path": [],
82
+ "ignore_args_error": false,
83
+ "use_swift_lora": false,
84
+ "output_dir": "/mnt/data/user/zhao_jun/tangjixin/output/model/intern2.5vl-7b-grpo_v2/v8-20250328-093218",
85
+ "overwrite_output_dir": false,
86
+ "do_train": false,
87
+ "do_eval": false,
88
+ "do_predict": false,
89
+ "eval_strategy": "steps",
90
+ "prediction_loss_only": false,
91
+ "per_device_train_batch_size": 3,
92
+ "per_device_eval_batch_size": 3,
93
+ "per_gpu_train_batch_size": null,
94
+ "per_gpu_eval_batch_size": null,
95
+ "gradient_accumulation_steps": 2,
96
+ "eval_accumulation_steps": null,
97
+ "eval_delay": 0,
98
+ "torch_empty_cache_steps": null,
99
+ "learning_rate": 2e-07,
100
+ "weight_decay": 0.1,
101
+ "adam_beta1": 0.9,
102
+ "adam_beta2": 0.95,
103
+ "adam_epsilon": 1e-08,
104
+ "max_grad_norm": 1.0,
105
+ "num_train_epochs": 1.0,
106
+ "max_steps": -1,
107
+ "lr_scheduler_type": "constant_with_warmup",
108
+ "lr_scheduler_kwargs": null,
109
+ "warmup_ratio": 0.05,
110
+ "warmup_steps": 0,
111
+ "log_level": "passive",
112
+ "log_level_replica": "warning",
113
+ "log_on_each_node": true,
114
+ "logging_dir": "/mnt/data/user/zhao_jun/tangjixin/output/model/intern2.5vl-7b-grpo_v2/v8-20250328-093218/runs",
115
+ "logging_strategy": "steps",
116
+ "logging_first_step": true,
117
+ "logging_steps": 5,
118
+ "logging_nan_inf_filter": true,
119
+ "save_strategy": "steps",
120
+ "save_steps": 250.0,
121
+ "save_total_limit": -1,
122
+ "save_safetensors": true,
123
+ "save_on_each_node": false,
124
+ "save_only_model": false,
125
+ "restore_callback_states_from_checkpoint": false,
126
+ "no_cuda": false,
127
+ "use_cpu": false,
128
+ "use_mps_device": false,
129
+ "jit_mode_eval": false,
130
+ "use_ipex": false,
131
+ "bf16": true,
132
+ "fp16": false,
133
+ "fp16_opt_level": "O1",
134
+ "half_precision_backend": "auto",
135
+ "bf16_full_eval": false,
136
+ "fp16_full_eval": false,
137
+ "tf32": null,
138
+ "local_rank": 0,
139
+ "ddp_backend": null,
140
+ "tpu_num_cores": null,
141
+ "tpu_metrics_debug": false,
142
+ "debug": null,
143
+ "dataloader_drop_last": false,
144
+ "eval_steps": 250.0,
145
+ "dataloader_num_workers": 4,
146
+ "dataloader_prefetch_factor": null,
147
+ "past_index": -1,
148
+ "run_name": null,
149
+ "disable_tqdm": null,
150
+ "label_names": null,
151
+ "load_best_model_at_end": false,
152
+ "metric_for_best_model": "reward",
153
+ "greater_is_better": true,
154
+ "ignore_data_skip": false,
155
+ "fsdp": "",
156
+ "fsdp_min_num_params": 0,
157
+ "fsdp_config": null,
158
+ "fsdp_transformer_layer_cls_to_wrap": null,
159
+ "accelerator_config": {
160
+ "dispatch_batches": false
161
+ },
162
+ "deepspeed": {
163
+ "fp16": {
164
+ "enabled": "auto",
165
+ "loss_scale": 0,
166
+ "loss_scale_window": 1000,
167
+ "initial_scale_power": 16,
168
+ "hysteresis": 2,
169
+ "min_loss_scale": 1
170
+ },
171
+ "bf16": {
172
+ "enabled": "auto"
173
+ },
174
+ "zero_optimization": {
175
+ "stage": 3,
176
+ "offload_optimizer": {
177
+ "device": "none",
178
+ "pin_memory": true
179
+ },
180
+ "offload_param": {
181
+ "device": "none",
182
+ "pin_memory": true
183
+ },
184
+ "overlap_comm": false,
185
+ "contiguous_gradients": true,
186
+ "sub_group_size": 1000000000.0,
187
+ "reduce_bucket_size": "auto",
188
+ "zero_quantized_weights": false,
189
+ "zero_quantized_gradients": false,
190
+ "stage3_prefetch_bucket_size": 0,
191
+ "stage3_param_persistence_threshold": "auto",
192
+ "stage3_max_live_parameters": 1000000000.0,
193
+ "stage3_max_reuse_distance": 1000000000.0,
194
+ "stage3_gather_16bit_weights_on_model_save": true
195
+ },
196
+ "gradient_accumulation_steps": "auto",
197
+ "gradient_clipping": "auto",
198
+ "steps_per_print": 2000,
199
+ "train_batch_size": "auto",
200
+ "train_micro_batch_size_per_gpu": "auto",
201
+ "wall_clock_breakdown": false
202
+ },
203
+ "label_smoothing_factor": 0.0,
204
+ "optim": "adamw_torch",
205
+ "optim_args": null,
206
+ "adafactor": false,
207
+ "group_by_length": false,
208
+ "length_column_name": "length",
209
+ "report_to": [
210
+ "wandb"
211
+ ],
212
+ "ddp_find_unused_parameters": null,
213
+ "ddp_bucket_cap_mb": null,
214
+ "ddp_broadcast_buffers": null,
215
+ "dataloader_pin_memory": true,
216
+ "dataloader_persistent_workers": false,
217
+ "skip_memory_metrics": true,
218
+ "use_legacy_prediction_loop": false,
219
+ "push_to_hub": false,
220
+ "resume_from_checkpoint": null,
221
+ "hub_model_id": null,
222
+ "hub_strategy": "every_save",
223
+ "hub_private_repo": null,
224
+ "hub_always_push": false,
225
+ "gradient_checkpointing": true,
226
+ "gradient_checkpointing_kwargs": null,
227
+ "include_inputs_for_metrics": false,
228
+ "include_for_metrics": [],
229
+ "eval_do_concat_batches": true,
230
+ "fp16_backend": "auto",
231
+ "evaluation_strategy": "steps",
232
+ "push_to_hub_model_id": null,
233
+ "push_to_hub_organization": null,
234
+ "push_to_hub_token": null,
235
+ "mp_parameters": "",
236
+ "auto_find_batch_size": false,
237
+ "full_determinism": false,
238
+ "torchdynamo": null,
239
+ "ray_scope": "last",
240
+ "ddp_timeout": 1800,
241
+ "torch_compile": false,
242
+ "torch_compile_backend": null,
243
+ "torch_compile_mode": null,
244
+ "dispatch_batches": null,
245
+ "split_batches": null,
246
+ "include_tokens_per_second": false,
247
+ "include_num_input_tokens_seen": false,
248
+ "neftune_noise_alpha": null,
249
+ "optim_target_modules": null,
250
+ "batch_eval_metrics": false,
251
+ "eval_on_start": false,
252
+ "use_liger_kernel": false,
253
+ "eval_use_gather_object": false,
254
+ "average_tokens_across_devices": false,
255
+ "sortish_sampler": false,
256
+ "predict_with_generate": false,
257
+ "generation_max_length": null,
258
+ "generation_num_beams": null,
259
+ "generation_config": null,
260
+ "check_model": true,
261
+ "acc_strategy": "token",
262
+ "train_sampler_random": true,
263
+ "metric_warmup_step": 0,
264
+ "fsdp_num": 1,
265
+ "acc_steps": 1,
266
+ "eval_use_evalscope": false,
267
+ "eval_datasets": [],
268
+ "eval_limit": null,
269
+ "eval_datasets_args": null,
270
+ "eval_generation_config": null,
271
+ "freeze_parameters": [
272
+ "vision_model",
273
+ "mlp1"
274
+ ],
275
+ "freeze_parameters_ratio": 0.0,
276
+ "trainable_parameters": [],
277
+ "freeze_llm": false,
278
+ "freeze_vit": true,
279
+ "freeze_aligner": true,
280
+ "target_modules": [
281
+ "all-linear"
282
+ ],
283
+ "target_regex": null,
284
+ "modules_to_save": [],
285
+ "lora_rank": 8,
286
+ "lora_alpha": 32,
287
+ "lora_dropout": 0.05,
288
+ "lora_bias": "none",
289
+ "lora_dtype": null,
290
+ "lorap_lr_ratio": null,
291
+ "use_rslora": false,
292
+ "use_dora": false,
293
+ "lora_ga_batch_size": 2,
294
+ "lora_ga_iters": 2,
295
+ "lora_ga_max_length": 1024,
296
+ "lora_ga_direction": "ArB2r",
297
+ "lora_ga_scale": "stable",
298
+ "lora_ga_stable_gamma": 16,
299
+ "init_weights": true,
300
+ "fourier_n_frequency": 2000,
301
+ "fourier_scaling": 300.0,
302
+ "boft_block_size": 4,
303
+ "boft_block_num": 0,
304
+ "boft_n_butterfly_factor": 1,
305
+ "boft_dropout": 0.0,
306
+ "vera_rank": 256,
307
+ "vera_projection_prng_key": 0,
308
+ "vera_dropout": 0.0,
309
+ "vera_d_initial": 0.1,
310
+ "adapter_act": "gelu",
311
+ "adapter_length": 128,
312
+ "use_galore": false,
313
+ "galore_target_modules": null,
314
+ "galore_rank": 128,
315
+ "galore_update_proj_gap": 50,
316
+ "galore_scale": 1.0,
317
+ "galore_proj_type": "std",
318
+ "galore_optim_per_parameter": false,
319
+ "galore_with_embedding": false,
320
+ "galore_quantization": false,
321
+ "galore_proj_quant": false,
322
+ "galore_proj_bits": 4,
323
+ "galore_proj_group_size": 256,
324
+ "galore_cos_threshold": 0.4,
325
+ "galore_gamma_proj": 2,
326
+ "galore_queue_size": 5,
327
+ "adalora_target_r": 8,
328
+ "adalora_init_r": 12,
329
+ "adalora_tinit": 0,
330
+ "adalora_tfinal": 0,
331
+ "adalora_deltaT": 1,
332
+ "adalora_beta1": 0.85,
333
+ "adalora_beta2": 0.85,
334
+ "adalora_orth_reg_weight": 0.5,
335
+ "llamapro_num_new_blocks": 4,
336
+ "llamapro_num_groups": null,
337
+ "lisa_activated_layers": 0,
338
+ "lisa_step_interval": 20,
339
+ "reft_layer_key": null,
340
+ "reft_layers": null,
341
+ "reft_rank": 4,
342
+ "reft_intervention_type": "LoreftIntervention",
343
+ "reft_args": null,
344
+ "use_liger": false,
345
+ "swanlab_token": null,
346
+ "swanlab_project": null,
347
+ "swanlab_workspace": null,
348
+ "swanlab_exp_name": null,
349
+ "swanlab_mode": "cloud",
350
+ "add_version": true,
351
+ "resume_only_model": false,
352
+ "create_checkpoint_symlink": false,
353
+ "packing": false,
354
+ "lazy_tokenize": true,
355
+ "loss_type": null,
356
+ "optimizer": null,
357
+ "metric": null,
358
+ "zero_hpz_partition_size": null,
359
+ "reward_model": null,
360
+ "reward_adapters": [],
361
+ "reward_model_type": null,
362
+ "reward_model_revision": null,
363
+ "num_ppo_epochs": 4,
364
+ "whiten_rewards": false,
365
+ "kl_coef": 0.05,
366
+ "cliprange": 0.2,
367
+ "vf_coef": 0.1,
368
+ "cliprange_value": 0.2,
369
+ "gamma": 1.0,
370
+ "lam": 0.95,
371
+ "num_mini_batches": 1,
372
+ "local_rollout_forward_batch_size": 64,
373
+ "num_sample_generations": 10,
374
+ "response_length": 512,
375
+ "missing_eos_penalty": null,
376
+ "epsilon": 0.2,
377
+ "epsilon_high": null,
378
+ "num_infer_workers": 1,
379
+ "vllm_max_num_seqs": 256,
380
+ "vllm_enforce_eager": false,
381
+ "vllm_limit_mm_per_prompt": null,
382
+ "vllm_enable_prefix_caching": true,
383
+ "cosine_min_len_value_wrong": -0.5,
384
+ "cosine_max_len_value_wrong": 0.0,
385
+ "cosine_min_len_value_correct": 1.0,
386
+ "cosine_max_len_value_correct": 0.5,
387
+ "cosine_max_len": null,
388
+ "repetition_n_grams": 3,
389
+ "repetition_max_penalty": -1.0,
390
+ "use_lmdeploy": true,
391
+ "lmdeploy_device": "auto",
392
+ "lmdeploy_session_len": null,
393
+ "lmdeploy_cache_max_entry_count": 0.8,
394
+ "async_generate": true,
395
+ "tensor_parallel_size": 1,
396
+ "sleep_level": 0,
397
+ "move_model_batches": null,
398
+ "offload_optimizer": false,
399
+ "offload_model": false,
400
+ "gc_collect_after_offload": false,
401
+ "multi_turn_func": null,
402
+ "mini_batch_size": null,
403
+ "num_generations": 12,
404
+ "max_completion_length": 2048,
405
+ "ds3_gather_for_generation": true,
406
+ "reward_funcs": [
407
+ "external_r1v_acc"
408
+ ],
409
+ "reward_weights": null,
410
+ "log_completions": true,
411
+ "use_vllm": false,
412
+ "vllm_device": [
413
+ "auto"
414
+ ],
415
+ "vllm_gpu_memory_utilization": 0.9,
416
+ "vllm_max_model_len": null,
417
+ "num_iterations": 1,
418
+ "rlhf_type": "grpo",
419
+ "ref_model": "/root/model/InternVL2_5-8B",
420
+ "ref_model_type": "internvl2_5",
421
+ "ref_model_revision": null,
422
+ "beta": 0.04,
423
+ "label_smoothing": 0,
424
+ "rpo_alpha": 1.0,
425
+ "cpo_alpha": 1.0,
426
+ "simpo_gamma": 1,
427
+ "desirable_weight": 1.0,
428
+ "undesirable_weight": 1.0,
429
+ "rank": 0,
430
+ "global_world_size": 4,
431
+ "local_world_size": 4,
432
+ "model_suffix": "InternVL2_5-8B",
433
+ "model_info": "ModelInfo(model_type='internvl2_5', model_dir='/root/model/InternVL2_5-8B', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling={'factor': 2.0, 'type': 'dynamic'}, config=None, task_type='causal_lm', num_labels=None)",
434
+ "model_meta": "ModelMeta(model_type='internvl2_5', model_groups=[ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL2_5-1B', hf_model_id='OpenGVLab/InternVL2_5-1B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-2B', hf_model_id='OpenGVLab/InternVL2_5-2B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-4B', hf_model_id='OpenGVLab/InternVL2_5-4B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-8B', hf_model_id='OpenGVLab/InternVL2_5-8B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-26B', hf_model_id='OpenGVLab/InternVL2_5-26B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-38B', hf_model_id='OpenGVLab/InternVL2_5-38B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-78B', hf_model_id='OpenGVLab/InternVL2_5-78B', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL2_5-4B-AWQ', hf_model_id='OpenGVLab/InternVL2_5-4B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-8B-AWQ', hf_model_id='OpenGVLab/InternVL2_5-8B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-26B-AWQ', hf_model_id='OpenGVLab/InternVL2_5-26B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-38B-AWQ', hf_model_id='OpenGVLab/InternVL2_5-38B-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-78B-AWQ', hf_model_id='OpenGVLab/InternVL2_5-78B-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='OpenGVLab/InternVL2_5-1B-MPO', hf_model_id='OpenGVLab/InternVL2_5-1B-MPO', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-2B-MPO', hf_model_id='OpenGVLab/InternVL2_5-2B-MPO', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-4B-MPO', hf_model_id='OpenGVLab/InternVL2_5-4B-MPO', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-8B-MPO', hf_model_id='OpenGVLab/InternVL2_5-8B-MPO', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-26B-MPO', hf_model_id='OpenGVLab/InternVL2_5-26B-MPO', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-38B-MPO', hf_model_id='OpenGVLab/InternVL2_5-38B-MPO', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='OpenGVLab/InternVL2_5-78B-MPO', hf_model_id='OpenGVLab/InternVL2_5-78B-MPO', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[])], template='internvl2_5', get_function=<function get_model_tokenizer_internvl at 0x7f62d105a830>, model_arch='internvl', architectures=['InternVLChatModel'], additional_saved_files=[], torch_dtype=None, is_multimodal=True, is_reward=False, task_type=None, ignore_patterns=['*.zip', '*.gguf', '*.pth', '*.pt', 'consolidated*', 'onnx/*', '*.safetensors.md', '*.msgpack', '*.onnx', '*.ot', '*.h5', '*.bin', '*.safetensors'], requires=['transformers>=4.36', 'timm'], tags=[])",
435
+ "model_dir": "/root/model/InternVL2_5-8B",
436
+ "hub": "<class 'swift.hub.hub.MSHub'>",
437
+ "training_args": "GRPOConfig(output_dir='/mnt/data/user/zhao_jun/tangjixin/output/model/intern2.5vl-7b-grpo_v2/v8-20250328-093218', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=3, per_device_eval_batch_size=3, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=2, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=2e-07, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.95, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=1.0, max_steps=-1, lr_scheduler_type=<SchedulerType.CONSTANT_WITH_WARMUP: 'constant_with_warmup'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/mnt/data/user/zhao_jun/tangjixin/output/model/intern2.5vl-7b-grpo_v2/v8-20250328-093218/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=250, save_total_limit=-1, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=250, dataloader_num_workers=4, dataloader_prefetch_factor=None, past_index=-1, run_name='/mnt/data/user/zhao_jun/tangjixin/output/model/intern2.5vl-7b-grpo_v2/v8-20250328-093218', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='reward', greater_is_better=True, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': False, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['wandb'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy='steps', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, model_init_kwargs=None, max_prompt_length=512, num_generations=12, temperature=1.0, max_completion_length=2048, ds3_gather_for_generation=True, use_vllm=False, vllm_device=['auto'], vllm_gpu_memory_utilization=0.9, vllm_dtype='auto', vllm_max_model_len=None, vllm_enable_prefix_caching=True, vllm_guided_decoding_regex=None, beta=0.04, num_iterations=1, epsilon=0.2, reward_weights=None, sync_ref_model=False, ref_model_mixup_alpha=0.6, ref_model_sync_steps=512, log_completions=True, check_model=True, acc_strategy='token', train_sampler_random=True, metric_warmup_step=0, fsdp_num=1, acc_steps=1, eval_use_evalscope=False, eval_datasets=[], eval_limit=None, eval_datasets_args=None, eval_generation_config=None, train_type='full', optimizer=None, local_repo_path=None, galore_config=None, epsilon_high=None, top_k=50, top_p=0.85, repetition_penalty=1.0, num_infer_workers=1, vllm_max_num_seqs=256, vllm_enforce_eager=False, vllm_limit_mm_per_prompt={}, cosine_min_len_value_wrong=-0.5, cosine_max_len_value_wrong=0.0, cosine_min_len_value_correct=1.0, cosine_max_len_value_correct=0.5, cosine_max_len=2048, repetition_n_grams=3, repetition_max_penalty=-1.0, use_lmdeploy=True, lmdeploy_device='auto', lmdeploy_session_len=None, lmdeploy_cache_max_entry_count=0.8, async_generate=True, tensor_parallel_size=1, sleep_level=0, move_model_batches=None, offload_optimizer=False, offload_model=False, gc_collect_after_offload=False, multi_turn_func=None, mini_batch_size=None, stop_words=[])"
438
+ }
config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_commit_hash": null,
3
+ "_name_or_path": "/root/model/InternVL2_5-8B",
4
+ "architectures": [
5
+ "InternVLChatModel"
6
+ ],
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_internvl_chat.InternVLChatConfig",
9
+ "AutoModel": "modeling_internvl_chat.InternVLChatModel",
10
+ "AutoModelForCausalLM": "modeling_internvl_chat.InternVLChatModel"
11
+ },
12
+ "downsample_ratio": 0.5,
13
+ "dynamic_image_size": true,
14
+ "force_image_size": 448,
15
+ "hidden_size": 4096,
16
+ "keys_to_ignore_at_inference": [
17
+ "past_key_values"
18
+ ],
19
+ "llm_config": {
20
+ "_attn_implementation_autoset": true,
21
+ "_name_or_path": "internlm/internlm2_5-7b-chat",
22
+ "add_cross_attention": false,
23
+ "architectures": [
24
+ "InternLM2ForCausalLM"
25
+ ],
26
+ "attn_implementation": "flash_attention_2",
27
+ "auto_map": {
28
+ "AutoConfig": "configuration_internlm2.InternLM2Config",
29
+ "AutoModel": "modeling_internlm2.InternLM2ForCausalLM",
30
+ "AutoModelForCausalLM": "modeling_internlm2.InternLM2ForCausalLM",
31
+ "AutoModelForSequenceClassification": "modeling_internlm2.InternLM2ForSequenceClassification"
32
+ },
33
+ "bad_words_ids": null,
34
+ "begin_suppress_tokens": null,
35
+ "bias": false,
36
+ "bos_token_id": 1,
37
+ "chunk_size_feed_forward": 0,
38
+ "cross_attention_hidden_size": null,
39
+ "decoder_start_token_id": null,
40
+ "diversity_penalty": 0.0,
41
+ "do_sample": false,
42
+ "early_stopping": false,
43
+ "encoder_no_repeat_ngram_size": 0,
44
+ "eos_token_id": 2,
45
+ "exponential_decay_length_penalty": null,
46
+ "finetuning_task": null,
47
+ "forced_bos_token_id": null,
48
+ "forced_eos_token_id": null,
49
+ "hidden_act": "silu",
50
+ "hidden_size": 4096,
51
+ "id2label": {
52
+ "0": "LABEL_0",
53
+ "1": "LABEL_1"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 14336,
57
+ "is_decoder": false,
58
+ "is_encoder_decoder": false,
59
+ "label2id": {
60
+ "LABEL_0": 0,
61
+ "LABEL_1": 1
62
+ },
63
+ "length_penalty": 1.0,
64
+ "max_length": 20,
65
+ "max_position_embeddings": 32768,
66
+ "min_length": 0,
67
+ "model_type": "internlm2",
68
+ "no_repeat_ngram_size": 0,
69
+ "num_attention_heads": 32,
70
+ "num_beam_groups": 1,
71
+ "num_beams": 1,
72
+ "num_hidden_layers": 32,
73
+ "num_key_value_heads": 8,
74
+ "num_return_sequences": 1,
75
+ "output_attentions": false,
76
+ "output_hidden_states": false,
77
+ "output_scores": false,
78
+ "pad_token_id": 2,
79
+ "prefix": null,
80
+ "pretraining_tp": 1,
81
+ "problem_type": null,
82
+ "pruned_heads": {},
83
+ "remove_invalid_values": false,
84
+ "repetition_penalty": 1.0,
85
+ "return_dict": true,
86
+ "return_dict_in_generate": false,
87
+ "rms_norm_eps": 1e-05,
88
+ "rope_scaling": {
89
+ "factor": 2.0,
90
+ "type": "dynamic"
91
+ },
92
+ "rope_theta": 1000000,
93
+ "sep_token_id": null,
94
+ "suppress_tokens": null,
95
+ "task_specific_params": null,
96
+ "temperature": 1.0,
97
+ "tf_legacy_loss": false,
98
+ "tie_encoder_decoder": false,
99
+ "tie_word_embeddings": false,
100
+ "tokenizer_class": null,
101
+ "top_k": 50,
102
+ "top_p": 1.0,
103
+ "torch_dtype": "bfloat16",
104
+ "torchscript": false,
105
+ "transformers_version": "4.48.2",
106
+ "typical_p": 1.0,
107
+ "use_bfloat16": true,
108
+ "use_cache": false,
109
+ "vocab_size": 92553
110
+ },
111
+ "max_dynamic_patch": 12,
112
+ "min_dynamic_patch": 1,
113
+ "model_type": "internvl_chat",
114
+ "pad_token_id": 2,
115
+ "ps_version": "v2",
116
+ "select_layer": -1,
117
+ "template": "internvl2_5",
118
+ "torch_dtype": "bfloat16",
119
+ "transformers_version": null,
120
+ "use_backbone_lora": 0,
121
+ "use_llm_lora": 0,
122
+ "use_thumbnail": true,
123
+ "vision_config": {
124
+ "_attn_implementation_autoset": true,
125
+ "_name_or_path": "",
126
+ "add_cross_attention": false,
127
+ "architectures": [
128
+ "InternVisionModel"
129
+ ],
130
+ "attention_dropout": 0.0,
131
+ "bad_words_ids": null,
132
+ "begin_suppress_tokens": null,
133
+ "bos_token_id": null,
134
+ "chunk_size_feed_forward": 0,
135
+ "cross_attention_hidden_size": null,
136
+ "decoder_start_token_id": null,
137
+ "diversity_penalty": 0.0,
138
+ "do_sample": false,
139
+ "drop_path_rate": 0.0,
140
+ "dropout": 0.0,
141
+ "early_stopping": false,
142
+ "encoder_no_repeat_ngram_size": 0,
143
+ "eos_token_id": null,
144
+ "exponential_decay_length_penalty": null,
145
+ "finetuning_task": null,
146
+ "forced_bos_token_id": null,
147
+ "forced_eos_token_id": null,
148
+ "hidden_act": "gelu",
149
+ "hidden_size": 1024,
150
+ "id2label": {
151
+ "0": "LABEL_0",
152
+ "1": "LABEL_1"
153
+ },
154
+ "image_size": 448,
155
+ "initializer_factor": 1.0,
156
+ "initializer_range": 0.02,
157
+ "intermediate_size": 4096,
158
+ "is_decoder": false,
159
+ "is_encoder_decoder": false,
160
+ "label2id": {
161
+ "LABEL_0": 0,
162
+ "LABEL_1": 1
163
+ },
164
+ "layer_norm_eps": 1e-06,
165
+ "length_penalty": 1.0,
166
+ "max_length": 20,
167
+ "min_length": 0,
168
+ "model_type": "intern_vit_6b",
169
+ "no_repeat_ngram_size": 0,
170
+ "norm_type": "layer_norm",
171
+ "num_attention_heads": 16,
172
+ "num_beam_groups": 1,
173
+ "num_beams": 1,
174
+ "num_channels": 3,
175
+ "num_hidden_layers": 24,
176
+ "num_return_sequences": 1,
177
+ "output_attentions": false,
178
+ "output_hidden_states": false,
179
+ "output_scores": false,
180
+ "pad_token_id": 2,
181
+ "patch_size": 14,
182
+ "prefix": null,
183
+ "problem_type": null,
184
+ "pruned_heads": {},
185
+ "qk_normalization": false,
186
+ "qkv_bias": true,
187
+ "remove_invalid_values": false,
188
+ "repetition_penalty": 1.0,
189
+ "return_dict": true,
190
+ "return_dict_in_generate": false,
191
+ "sep_token_id": null,
192
+ "suppress_tokens": null,
193
+ "task_specific_params": null,
194
+ "temperature": 1.0,
195
+ "tf_legacy_loss": false,
196
+ "tie_encoder_decoder": false,
197
+ "tie_word_embeddings": true,
198
+ "tokenizer_class": null,
199
+ "top_k": 50,
200
+ "top_p": 1.0,
201
+ "torch_dtype": "bfloat16",
202
+ "torchscript": false,
203
+ "transformers_version": "4.48.2",
204
+ "typical_p": 1.0,
205
+ "use_bfloat16": true,
206
+ "use_flash_attn": true
207
+ }
208
+ }
configuration_intern_vit.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import os
8
+ from typing import Union
9
+
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ logger = logging.get_logger(__name__)
14
+
15
+
16
+ class InternVisionConfig(PretrainedConfig):
17
+ r"""
18
+ This is the configuration class to store the configuration of a [`InternVisionModel`]. It is used to
19
+ instantiate a vision encoder according to the specified arguments, defining the model architecture.
20
+
21
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
22
+ documentation from [`PretrainedConfig`] for more information.
23
+
24
+ Args:
25
+ num_channels (`int`, *optional*, defaults to 3):
26
+ Number of color channels in the input images (e.g., 3 for RGB).
27
+ patch_size (`int`, *optional*, defaults to 14):
28
+ The size (resolution) of each patch.
29
+ image_size (`int`, *optional*, defaults to 224):
30
+ The size (resolution) of each image.
31
+ qkv_bias (`bool`, *optional*, defaults to `False`):
32
+ Whether to add a bias to the queries and values in the self-attention layers.
33
+ hidden_size (`int`, *optional*, defaults to 3200):
34
+ Dimensionality of the encoder layers and the pooler layer.
35
+ num_attention_heads (`int`, *optional*, defaults to 25):
36
+ Number of attention heads for each attention layer in the Transformer encoder.
37
+ intermediate_size (`int`, *optional*, defaults to 12800):
38
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
39
+ qk_normalization (`bool`, *optional*, defaults to `True`):
40
+ Whether to normalize the queries and keys in the self-attention layers.
41
+ num_hidden_layers (`int`, *optional*, defaults to 48):
42
+ Number of hidden layers in the Transformer encoder.
43
+ use_flash_attn (`bool`, *optional*, defaults to `True`):
44
+ Whether to use flash attention mechanism.
45
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
46
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
47
+ `"relu"`, `"selu"` and `"gelu_new"` ``"gelu"` are supported.
48
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
49
+ The epsilon used by the layer normalization layers.
50
+ dropout (`float`, *optional*, defaults to 0.0):
51
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
52
+ drop_path_rate (`float`, *optional*, defaults to 0.0):
53
+ Dropout rate for stochastic depth.
54
+ attention_dropout (`float`, *optional*, defaults to 0.0):
55
+ The dropout ratio for the attention probabilities.
56
+ initializer_range (`float`, *optional*, defaults to 0.02):
57
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
58
+ initializer_factor (`float`, *optional*, defaults to 0.1):
59
+ A factor for layer scale.
60
+ """
61
+
62
+ model_type = 'intern_vit_6b'
63
+
64
+ def __init__(
65
+ self,
66
+ num_channels=3,
67
+ patch_size=14,
68
+ image_size=224,
69
+ qkv_bias=False,
70
+ hidden_size=3200,
71
+ num_attention_heads=25,
72
+ intermediate_size=12800,
73
+ qk_normalization=True,
74
+ num_hidden_layers=48,
75
+ use_flash_attn=True,
76
+ hidden_act='gelu',
77
+ norm_type='rms_norm',
78
+ layer_norm_eps=1e-6,
79
+ dropout=0.0,
80
+ drop_path_rate=0.0,
81
+ attention_dropout=0.0,
82
+ initializer_range=0.02,
83
+ initializer_factor=0.1,
84
+ **kwargs,
85
+ ):
86
+ super().__init__(**kwargs)
87
+
88
+ self.hidden_size = hidden_size
89
+ self.intermediate_size = intermediate_size
90
+ self.dropout = dropout
91
+ self.drop_path_rate = drop_path_rate
92
+ self.num_hidden_layers = num_hidden_layers
93
+ self.num_attention_heads = num_attention_heads
94
+ self.num_channels = num_channels
95
+ self.patch_size = patch_size
96
+ self.image_size = image_size
97
+ self.initializer_range = initializer_range
98
+ self.initializer_factor = initializer_factor
99
+ self.attention_dropout = attention_dropout
100
+ self.layer_norm_eps = layer_norm_eps
101
+ self.hidden_act = hidden_act
102
+ self.norm_type = norm_type
103
+ self.qkv_bias = qkv_bias
104
+ self.qk_normalization = qk_normalization
105
+ self.use_flash_attn = use_flash_attn
106
+
107
+ @classmethod
108
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> 'PretrainedConfig':
109
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
110
+
111
+ if 'vision_config' in config_dict:
112
+ config_dict = config_dict['vision_config']
113
+
114
+ if 'model_type' in config_dict and hasattr(cls, 'model_type') and config_dict['model_type'] != cls.model_type:
115
+ logger.warning(
116
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
117
+ f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.'
118
+ )
119
+
120
+ return cls.from_dict(config_dict, **kwargs)
configuration_internlm2.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/configuration_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ InternLM2 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ INTERNLM2_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
24
+
25
+
26
+ # Modified from transformers.model.llama.configuration_llama.LlamaConfig
27
+ class InternLM2Config(PretrainedConfig):
28
+ r"""
29
+ This is the configuration class to store the configuration of a [`InternLM2Model`]. It is used to instantiate
30
+ an InternLM2 model according to the specified arguments, defining the model architecture. Instantiating a
31
+ configuration with the defaults will yield a similar configuration to that of the InternLM2-7B.
32
+
33
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
34
+ documentation from [`PretrainedConfig`] for more information.
35
+
36
+
37
+ Args:
38
+ vocab_size (`int`, *optional*, defaults to 32000):
39
+ Vocabulary size of the InternLM2 model. Defines the number of different tokens that can be represented by the
40
+ `inputs_ids` passed when calling [`InternLM2Model`]
41
+ hidden_size (`int`, *optional*, defaults to 4096):
42
+ Dimension of the hidden representations.
43
+ intermediate_size (`int`, *optional*, defaults to 11008):
44
+ Dimension of the MLP representations.
45
+ num_hidden_layers (`int`, *optional*, defaults to 32):
46
+ Number of hidden layers in the Transformer encoder.
47
+ num_attention_heads (`int`, *optional*, defaults to 32):
48
+ Number of attention heads for each attention layer in the Transformer encoder.
49
+ num_key_value_heads (`int`, *optional*):
50
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
51
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
52
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
53
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
54
+ by meanpooling all the original heads within that group. For more details checkout [this
55
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
56
+ `num_attention_heads`.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
60
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
61
+ just in case (e.g., 512 or 1024 or 2048).
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-12):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
70
+ Whether to tie weight embeddings
71
+ Example:
72
+
73
+ """
74
+ model_type = 'internlm2'
75
+ _auto_class = 'AutoConfig'
76
+
77
+ def __init__( # pylint: disable=W0102
78
+ self,
79
+ vocab_size=103168,
80
+ hidden_size=4096,
81
+ intermediate_size=11008,
82
+ num_hidden_layers=32,
83
+ num_attention_heads=32,
84
+ num_key_value_heads=None,
85
+ hidden_act='silu',
86
+ max_position_embeddings=2048,
87
+ initializer_range=0.02,
88
+ rms_norm_eps=1e-6,
89
+ use_cache=True,
90
+ pad_token_id=0,
91
+ bos_token_id=1,
92
+ eos_token_id=2,
93
+ tie_word_embeddings=False,
94
+ bias=True,
95
+ rope_theta=10000,
96
+ rope_scaling=None,
97
+ attn_implementation='eager',
98
+ **kwargs,
99
+ ):
100
+ self.vocab_size = vocab_size
101
+ self.max_position_embeddings = max_position_embeddings
102
+ self.hidden_size = hidden_size
103
+ self.intermediate_size = intermediate_size
104
+ self.num_hidden_layers = num_hidden_layers
105
+ self.num_attention_heads = num_attention_heads
106
+ self.bias = bias
107
+
108
+ if num_key_value_heads is None:
109
+ num_key_value_heads = num_attention_heads
110
+ self.num_key_value_heads = num_key_value_heads
111
+
112
+ self.hidden_act = hidden_act
113
+ self.initializer_range = initializer_range
114
+ self.rms_norm_eps = rms_norm_eps
115
+ self.use_cache = use_cache
116
+ self.rope_theta = rope_theta
117
+ self.rope_scaling = rope_scaling
118
+ self._rope_scaling_validation()
119
+
120
+ self.attn_implementation = attn_implementation
121
+ if self.attn_implementation is None:
122
+ self.attn_implementation = 'eager'
123
+ super().__init__(
124
+ pad_token_id=pad_token_id,
125
+ bos_token_id=bos_token_id,
126
+ eos_token_id=eos_token_id,
127
+ tie_word_embeddings=tie_word_embeddings,
128
+ **kwargs,
129
+ )
130
+
131
+ def _rope_scaling_validation(self):
132
+ """
133
+ Validate the `rope_scaling` configuration.
134
+ """
135
+ if self.rope_scaling is None:
136
+ return
137
+
138
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
139
+ raise ValueError(
140
+ '`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, '
141
+ f'got {self.rope_scaling}'
142
+ )
143
+ rope_scaling_type = self.rope_scaling.get('type', None)
144
+ rope_scaling_factor = self.rope_scaling.get('factor', None)
145
+ if rope_scaling_type is None or rope_scaling_type not in ['linear', 'dynamic']:
146
+ raise ValueError(
147
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
148
+ )
149
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor < 1.0:
150
+ raise ValueError(f"`rope_scaling`'s factor field must be a float >= 1, got {rope_scaling_factor}")
configuration_internvl_chat.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import copy
8
+
9
+ from transformers import AutoConfig, LlamaConfig
10
+ from transformers.configuration_utils import PretrainedConfig
11
+ from transformers.utils import logging
12
+
13
+ from .configuration_intern_vit import InternVisionConfig
14
+ from .configuration_internlm2 import InternLM2Config
15
+
16
+ logger = logging.get_logger(__name__)
17
+
18
+
19
+ class InternVLChatConfig(PretrainedConfig):
20
+ model_type = 'internvl_chat'
21
+ is_composition = True
22
+
23
+ def __init__(
24
+ self,
25
+ vision_config=None,
26
+ llm_config=None,
27
+ use_backbone_lora=0,
28
+ use_llm_lora=0,
29
+ select_layer=-1,
30
+ force_image_size=None,
31
+ downsample_ratio=0.5,
32
+ template=None,
33
+ dynamic_image_size=False,
34
+ use_thumbnail=False,
35
+ ps_version='v1',
36
+ min_dynamic_patch=1,
37
+ max_dynamic_patch=6,
38
+ **kwargs):
39
+ super().__init__(**kwargs)
40
+
41
+ if vision_config is None:
42
+ vision_config = {'architectures': ['InternVisionModel']}
43
+ logger.info('vision_config is None. Initializing the InternVisionConfig with default values.')
44
+
45
+ if llm_config is None:
46
+ llm_config = {'architectures': ['InternLM2ForCausalLM']}
47
+ logger.info('llm_config is None. Initializing the LlamaConfig config with default values (`LlamaConfig`).')
48
+
49
+ self.vision_config = InternVisionConfig(**vision_config)
50
+ if llm_config.get('architectures')[0] == 'LlamaForCausalLM':
51
+ self.llm_config = LlamaConfig(**llm_config)
52
+ elif llm_config.get('architectures')[0] == 'InternLM2ForCausalLM':
53
+ self.llm_config = InternLM2Config(**llm_config)
54
+ else:
55
+ raise ValueError('Unsupported architecture: {}'.format(llm_config.get('architectures')[0]))
56
+ self.use_backbone_lora = use_backbone_lora
57
+ self.use_llm_lora = use_llm_lora
58
+ self.select_layer = select_layer
59
+ self.force_image_size = force_image_size
60
+ self.downsample_ratio = downsample_ratio
61
+ self.template = template
62
+ self.dynamic_image_size = dynamic_image_size
63
+ self.use_thumbnail = use_thumbnail
64
+ self.ps_version = ps_version # pixel shuffle version
65
+ self.min_dynamic_patch = min_dynamic_patch
66
+ self.max_dynamic_patch = max_dynamic_patch
67
+
68
+ logger.info(f'vision_select_layer: {self.select_layer}')
69
+ logger.info(f'ps_version: {self.ps_version}')
70
+ logger.info(f'min_dynamic_patch: {self.min_dynamic_patch}')
71
+ logger.info(f'max_dynamic_patch: {self.max_dynamic_patch}')
72
+
73
+ def to_dict(self):
74
+ """
75
+ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
76
+
77
+ Returns:
78
+ `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
79
+ """
80
+ output = copy.deepcopy(self.__dict__)
81
+ output['vision_config'] = self.vision_config.to_dict()
82
+ output['llm_config'] = self.llm_config.to_dict()
83
+ output['model_type'] = self.__class__.model_type
84
+ output['use_backbone_lora'] = self.use_backbone_lora
85
+ output['use_llm_lora'] = self.use_llm_lora
86
+ output['select_layer'] = self.select_layer
87
+ output['force_image_size'] = self.force_image_size
88
+ output['downsample_ratio'] = self.downsample_ratio
89
+ output['template'] = self.template
90
+ output['dynamic_image_size'] = self.dynamic_image_size
91
+ output['use_thumbnail'] = self.use_thumbnail
92
+ output['ps_version'] = self.ps_version
93
+ output['min_dynamic_patch'] = self.min_dynamic_patch
94
+ output['max_dynamic_patch'] = self.max_dynamic_patch
95
+
96
+ return output
conversation.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversation prompt templates.
3
+
4
+ We kindly request that you import fastchat instead of copying this file if you wish to use it.
5
+ If you have changes in mind, please contribute back so the community can benefit collectively and continue to maintain these valuable templates.
6
+
7
+ Modified from https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
8
+ """
9
+
10
+ import dataclasses
11
+ from enum import IntEnum, auto
12
+ from typing import Dict, List, Tuple, Union
13
+
14
+
15
+ class SeparatorStyle(IntEnum):
16
+ """Separator styles."""
17
+
18
+ ADD_COLON_SINGLE = auto()
19
+ ADD_COLON_TWO = auto()
20
+ ADD_COLON_SPACE_SINGLE = auto()
21
+ NO_COLON_SINGLE = auto()
22
+ NO_COLON_TWO = auto()
23
+ ADD_NEW_LINE_SINGLE = auto()
24
+ LLAMA2 = auto()
25
+ CHATGLM = auto()
26
+ CHATML = auto()
27
+ CHATINTERN = auto()
28
+ DOLLY = auto()
29
+ RWKV = auto()
30
+ PHOENIX = auto()
31
+ ROBIN = auto()
32
+ FALCON_CHAT = auto()
33
+ CHATGLM3 = auto()
34
+ INTERNVL_ZH = auto()
35
+ MPT = auto()
36
+
37
+
38
+ @dataclasses.dataclass
39
+ class Conversation:
40
+ """A class that manages prompt templates and keeps all conversation history."""
41
+
42
+ # The name of this template
43
+ name: str
44
+ # The template of the system prompt
45
+ system_template: str = '{system_message}'
46
+ # The system message
47
+ system_message: str = ''
48
+ # The names of two roles
49
+ roles: Tuple[str] = ('USER', 'ASSISTANT')
50
+ # All messages. Each item is (role, message).
51
+ messages: List[List[str]] = ()
52
+ # The number of few shot examples
53
+ offset: int = 0
54
+ # The separator style and configurations
55
+ sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
56
+ sep: str = '\n'
57
+ sep2: str = None
58
+ # Stop criteria (the default one is EOS token)
59
+ stop_str: Union[str, List[str]] = None
60
+ # Stops generation if meeting any token in this list
61
+ stop_token_ids: List[int] = None
62
+
63
+ def get_prompt(self) -> str:
64
+ """Get the prompt for generation."""
65
+ system_prompt = self.system_template.format(system_message=self.system_message)
66
+ if self.sep_style == SeparatorStyle.ADD_COLON_SINGLE:
67
+ ret = system_prompt + self.sep
68
+ for role, message in self.messages:
69
+ if message:
70
+ ret += role + ': ' + message + self.sep
71
+ else:
72
+ ret += role + ':'
73
+ return ret
74
+ elif self.sep_style == SeparatorStyle.ADD_COLON_TWO:
75
+ seps = [self.sep, self.sep2]
76
+ ret = system_prompt + seps[0]
77
+ for i, (role, message) in enumerate(self.messages):
78
+ if message:
79
+ ret += role + ': ' + message + seps[i % 2]
80
+ else:
81
+ ret += role + ':'
82
+ return ret
83
+ elif self.sep_style == SeparatorStyle.ADD_COLON_SPACE_SINGLE:
84
+ ret = system_prompt + self.sep
85
+ for role, message in self.messages:
86
+ if message:
87
+ ret += role + ': ' + message + self.sep
88
+ else:
89
+ ret += role + ': ' # must be end with a space
90
+ return ret
91
+ elif self.sep_style == SeparatorStyle.ADD_NEW_LINE_SINGLE:
92
+ ret = '' if system_prompt == '' else system_prompt + self.sep
93
+ for role, message in self.messages:
94
+ if message:
95
+ ret += role + '\n' + message + self.sep
96
+ else:
97
+ ret += role + '\n'
98
+ return ret
99
+ elif self.sep_style == SeparatorStyle.NO_COLON_SINGLE:
100
+ ret = system_prompt
101
+ for role, message in self.messages:
102
+ if message:
103
+ ret += role + message + self.sep
104
+ else:
105
+ ret += role
106
+ return ret
107
+ elif self.sep_style == SeparatorStyle.NO_COLON_TWO:
108
+ seps = [self.sep, self.sep2]
109
+ ret = system_prompt
110
+ for i, (role, message) in enumerate(self.messages):
111
+ if message:
112
+ ret += role + message + seps[i % 2]
113
+ else:
114
+ ret += role
115
+ return ret
116
+ elif self.sep_style == SeparatorStyle.RWKV:
117
+ ret = system_prompt
118
+ for i, (role, message) in enumerate(self.messages):
119
+ if message:
120
+ ret += (
121
+ role
122
+ + ': '
123
+ + message.replace('\r\n', '\n').replace('\n\n', '\n')
124
+ )
125
+ ret += '\n\n'
126
+ else:
127
+ ret += role + ':'
128
+ return ret
129
+ elif self.sep_style == SeparatorStyle.LLAMA2:
130
+ seps = [self.sep, self.sep2]
131
+ if self.system_message:
132
+ ret = system_prompt
133
+ else:
134
+ ret = '[INST] '
135
+ for i, (role, message) in enumerate(self.messages):
136
+ tag = self.roles[i % 2]
137
+ if message:
138
+ if i == 0:
139
+ ret += message + ' '
140
+ else:
141
+ ret += tag + ' ' + message + seps[i % 2]
142
+ else:
143
+ ret += tag
144
+ return ret
145
+ elif self.sep_style == SeparatorStyle.CHATGLM:
146
+ # source: https://huggingface.co/THUDM/chatglm-6b/blob/1d240ba371910e9282298d4592532d7f0f3e9f3e/modeling_chatglm.py#L1302-L1308
147
+ # source2: https://huggingface.co/THUDM/chatglm2-6b/blob/e186c891cf64310ac66ef10a87e6635fa6c2a579/modeling_chatglm.py#L926
148
+ round_add_n = 1 if self.name == 'chatglm2' else 0
149
+ if system_prompt:
150
+ ret = system_prompt + self.sep
151
+ else:
152
+ ret = ''
153
+
154
+ for i, (role, message) in enumerate(self.messages):
155
+ if i % 2 == 0:
156
+ ret += f'[Round {i//2 + round_add_n}]{self.sep}'
157
+
158
+ if message:
159
+ ret += f'{role}:{message}{self.sep}'
160
+ else:
161
+ ret += f'{role}:'
162
+ return ret
163
+ elif self.sep_style == SeparatorStyle.CHATML:
164
+ ret = '' if system_prompt == '' else system_prompt + self.sep + '\n'
165
+ for role, message in self.messages:
166
+ if message:
167
+ ret += role + '\n' + message + self.sep + '\n'
168
+ else:
169
+ ret += role + '\n'
170
+ return ret
171
+ elif self.sep_style == SeparatorStyle.CHATGLM3:
172
+ ret = ''
173
+ if self.system_message:
174
+ ret += system_prompt
175
+ for role, message in self.messages:
176
+ if message:
177
+ ret += role + '\n' + ' ' + message
178
+ else:
179
+ ret += role
180
+ return ret
181
+ elif self.sep_style == SeparatorStyle.CHATINTERN:
182
+ # source: https://huggingface.co/internlm/internlm-chat-7b-8k/blob/bd546fa984b4b0b86958f56bf37f94aa75ab8831/modeling_internlm.py#L771
183
+ seps = [self.sep, self.sep2]
184
+ ret = system_prompt
185
+ for i, (role, message) in enumerate(self.messages):
186
+ # if i % 2 == 0:
187
+ # ret += "<s>"
188
+ if message:
189
+ ret += role + ':' + message + seps[i % 2] + '\n'
190
+ else:
191
+ ret += role + ':'
192
+ return ret
193
+ elif self.sep_style == SeparatorStyle.DOLLY:
194
+ seps = [self.sep, self.sep2]
195
+ ret = system_prompt
196
+ for i, (role, message) in enumerate(self.messages):
197
+ if message:
198
+ ret += role + ':\n' + message + seps[i % 2]
199
+ if i % 2 == 1:
200
+ ret += '\n\n'
201
+ else:
202
+ ret += role + ':\n'
203
+ return ret
204
+ elif self.sep_style == SeparatorStyle.PHOENIX:
205
+ ret = system_prompt
206
+ for role, message in self.messages:
207
+ if message:
208
+ ret += role + ': ' + '<s>' + message + '</s>'
209
+ else:
210
+ ret += role + ': ' + '<s>'
211
+ return ret
212
+ elif self.sep_style == SeparatorStyle.ROBIN:
213
+ ret = system_prompt + self.sep
214
+ for role, message in self.messages:
215
+ if message:
216
+ ret += role + ':\n' + message + self.sep
217
+ else:
218
+ ret += role + ':\n'
219
+ return ret
220
+ elif self.sep_style == SeparatorStyle.FALCON_CHAT:
221
+ ret = ''
222
+ if self.system_message:
223
+ ret += system_prompt + self.sep
224
+ for role, message in self.messages:
225
+ if message:
226
+ ret += role + ': ' + message + self.sep
227
+ else:
228
+ ret += role + ':'
229
+
230
+ return ret
231
+ elif self.sep_style == SeparatorStyle.INTERNVL_ZH:
232
+ seps = [self.sep, self.sep2]
233
+ ret = self.system_message + seps[0]
234
+ for i, (role, message) in enumerate(self.messages):
235
+ if message:
236
+ ret += role + ': ' + message + seps[i % 2]
237
+ else:
238
+ ret += role + ':'
239
+ return ret
240
+ elif self.sep_style == SeparatorStyle.MPT:
241
+ ret = system_prompt + self.sep
242
+ for role, message in self.messages:
243
+ if message:
244
+ if type(message) is tuple:
245
+ message, _, _ = message
246
+ ret += role + message + self.sep
247
+ else:
248
+ ret += role
249
+ return ret
250
+ else:
251
+ raise ValueError(f'Invalid style: {self.sep_style}')
252
+
253
+ def set_system_message(self, system_message: str):
254
+ """Set the system message."""
255
+ self.system_message = system_message
256
+
257
+ def append_message(self, role: str, message: str):
258
+ """Append a new message."""
259
+ self.messages.append([role, message])
260
+
261
+ def update_last_message(self, message: str):
262
+ """Update the last output.
263
+
264
+ The last message is typically set to be None when constructing the prompt,
265
+ so we need to update it in-place after getting the response from a model.
266
+ """
267
+ self.messages[-1][1] = message
268
+
269
+ def to_gradio_chatbot(self):
270
+ """Convert the conversation to gradio chatbot format."""
271
+ ret = []
272
+ for i, (role, msg) in enumerate(self.messages[self.offset :]):
273
+ if i % 2 == 0:
274
+ ret.append([msg, None])
275
+ else:
276
+ ret[-1][-1] = msg
277
+ return ret
278
+
279
+ def to_openai_api_messages(self):
280
+ """Convert the conversation to OpenAI chat completion format."""
281
+ ret = [{'role': 'system', 'content': self.system_message}]
282
+
283
+ for i, (_, msg) in enumerate(self.messages[self.offset :]):
284
+ if i % 2 == 0:
285
+ ret.append({'role': 'user', 'content': msg})
286
+ else:
287
+ if msg is not None:
288
+ ret.append({'role': 'assistant', 'content': msg})
289
+ return ret
290
+
291
+ def copy(self):
292
+ return Conversation(
293
+ name=self.name,
294
+ system_template=self.system_template,
295
+ system_message=self.system_message,
296
+ roles=self.roles,
297
+ messages=[[x, y] for x, y in self.messages],
298
+ offset=self.offset,
299
+ sep_style=self.sep_style,
300
+ sep=self.sep,
301
+ sep2=self.sep2,
302
+ stop_str=self.stop_str,
303
+ stop_token_ids=self.stop_token_ids,
304
+ )
305
+
306
+ def dict(self):
307
+ return {
308
+ 'template_name': self.name,
309
+ 'system_message': self.system_message,
310
+ 'roles': self.roles,
311
+ 'messages': self.messages,
312
+ 'offset': self.offset,
313
+ }
314
+
315
+
316
+ # A global registry for all conversation templates
317
+ conv_templates: Dict[str, Conversation] = {}
318
+
319
+
320
+ def register_conv_template(template: Conversation, override: bool = False):
321
+ """Register a new conversation template."""
322
+ if not override:
323
+ assert (
324
+ template.name not in conv_templates
325
+ ), f'{template.name} has been registered.'
326
+
327
+ conv_templates[template.name] = template
328
+
329
+
330
+ def get_conv_template(name: str) -> Conversation:
331
+ """Get a conversation template."""
332
+ return conv_templates[name].copy()
333
+
334
+
335
+ # Both Hermes-2 and internlm2-chat are chatml-format conversation templates. The difference
336
+ # is that during training, the preprocessing function for the Hermes-2 template doesn't add
337
+ # <s> at the beginning of the tokenized sequence, while the internlm2-chat template does.
338
+ # Therefore, they are completely equivalent during inference.
339
+ register_conv_template(
340
+ Conversation(
341
+ name='Hermes-2',
342
+ system_template='<|im_start|>system\n{system_message}',
343
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
344
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
345
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
346
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
347
+ sep_style=SeparatorStyle.MPT,
348
+ sep='<|im_end|>',
349
+ stop_str='<|endoftext|>',
350
+ )
351
+ )
352
+
353
+
354
+ register_conv_template(
355
+ Conversation(
356
+ name='internlm2-chat',
357
+ system_template='<|im_start|>system\n{system_message}',
358
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
359
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
360
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
361
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
362
+ sep_style=SeparatorStyle.MPT,
363
+ sep='<|im_end|>',
364
+ )
365
+ )
366
+
367
+
368
+ register_conv_template(
369
+ Conversation(
370
+ name='phi3-chat',
371
+ system_template='<|system|>\n{system_message}',
372
+ # note: The new system prompt was not used here to avoid changes in benchmark performance.
373
+ # system_message='我是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
374
+ system_message='你是由上海人工智能实验室联合商汤科技开发的书生多模态大模型,英文名叫InternVL, 是一个有用无害的人工智能助手。',
375
+ roles=('<|user|>\n', '<|assistant|>\n'),
376
+ sep_style=SeparatorStyle.MPT,
377
+ sep='<|end|>',
378
+ )
379
+ )
380
+
381
+
382
+ register_conv_template(
383
+ Conversation(
384
+ name='internvl2_5',
385
+ system_template='<|im_start|>system\n{system_message}',
386
+ system_message='你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。',
387
+ roles=('<|im_start|>user\n', '<|im_start|>assistant\n'),
388
+ sep_style=SeparatorStyle.MPT,
389
+ sep='<|im_end|>\n',
390
+ )
391
+ )
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "eos_token_id": [
4
+ 92542,
5
+ 92543
6
+ ],
7
+ "transformers_version": "4.48.2"
8
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step2475
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1ce98503a93743539767c40ed45603dfab522b89a779886416d45ee0e24901b
3
+ size 4939944336
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e14c66f0c2294aee5e88187e93b5c5a87e09c0bd95690b1d7c8ece813e10d402
3
+ size 4915914584
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc0f6c907ec2a2253426b2c0d9aed3af3114cfb40db15dbfe4be734e526ea1f6
3
+ size 4915914592
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a037625c5e0e1d4cce372d9668451e4a9b1411174380a54234c5249b922382f9
3
+ size 1379026920
model.safetensors.index.json ADDED
@@ -0,0 +1,580 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16150730752
4
+ },
5
+ "weight_map": {
6
+ "language_model.model.layers.0.attention.wo.weight": "model-00001-of-00004.safetensors",
7
+ "language_model.model.layers.0.attention.wqkv.weight": "model-00001-of-00004.safetensors",
8
+ "language_model.model.layers.0.attention_norm.weight": "model-00001-of-00004.safetensors",
9
+ "language_model.model.layers.0.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
10
+ "language_model.model.layers.0.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
11
+ "language_model.model.layers.0.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
12
+ "language_model.model.layers.0.ffn_norm.weight": "model-00001-of-00004.safetensors",
13
+ "language_model.model.layers.1.attention.wo.weight": "model-00001-of-00004.safetensors",
14
+ "language_model.model.layers.1.attention.wqkv.weight": "model-00001-of-00004.safetensors",
15
+ "language_model.model.layers.1.attention_norm.weight": "model-00001-of-00004.safetensors",
16
+ "language_model.model.layers.1.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
17
+ "language_model.model.layers.1.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
18
+ "language_model.model.layers.1.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
19
+ "language_model.model.layers.1.ffn_norm.weight": "model-00001-of-00004.safetensors",
20
+ "language_model.model.layers.10.attention.wo.weight": "model-00002-of-00004.safetensors",
21
+ "language_model.model.layers.10.attention.wqkv.weight": "model-00002-of-00004.safetensors",
22
+ "language_model.model.layers.10.attention_norm.weight": "model-00002-of-00004.safetensors",
23
+ "language_model.model.layers.10.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
24
+ "language_model.model.layers.10.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
25
+ "language_model.model.layers.10.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
26
+ "language_model.model.layers.10.ffn_norm.weight": "model-00002-of-00004.safetensors",
27
+ "language_model.model.layers.11.attention.wo.weight": "model-00002-of-00004.safetensors",
28
+ "language_model.model.layers.11.attention.wqkv.weight": "model-00002-of-00004.safetensors",
29
+ "language_model.model.layers.11.attention_norm.weight": "model-00002-of-00004.safetensors",
30
+ "language_model.model.layers.11.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
31
+ "language_model.model.layers.11.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
32
+ "language_model.model.layers.11.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
33
+ "language_model.model.layers.11.ffn_norm.weight": "model-00002-of-00004.safetensors",
34
+ "language_model.model.layers.12.attention.wo.weight": "model-00002-of-00004.safetensors",
35
+ "language_model.model.layers.12.attention.wqkv.weight": "model-00002-of-00004.safetensors",
36
+ "language_model.model.layers.12.attention_norm.weight": "model-00002-of-00004.safetensors",
37
+ "language_model.model.layers.12.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
38
+ "language_model.model.layers.12.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
39
+ "language_model.model.layers.12.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
40
+ "language_model.model.layers.12.ffn_norm.weight": "model-00002-of-00004.safetensors",
41
+ "language_model.model.layers.13.attention.wo.weight": "model-00002-of-00004.safetensors",
42
+ "language_model.model.layers.13.attention.wqkv.weight": "model-00002-of-00004.safetensors",
43
+ "language_model.model.layers.13.attention_norm.weight": "model-00002-of-00004.safetensors",
44
+ "language_model.model.layers.13.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
45
+ "language_model.model.layers.13.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
46
+ "language_model.model.layers.13.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
47
+ "language_model.model.layers.13.ffn_norm.weight": "model-00002-of-00004.safetensors",
48
+ "language_model.model.layers.14.attention.wo.weight": "model-00002-of-00004.safetensors",
49
+ "language_model.model.layers.14.attention.wqkv.weight": "model-00002-of-00004.safetensors",
50
+ "language_model.model.layers.14.attention_norm.weight": "model-00002-of-00004.safetensors",
51
+ "language_model.model.layers.14.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
52
+ "language_model.model.layers.14.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
53
+ "language_model.model.layers.14.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
54
+ "language_model.model.layers.14.ffn_norm.weight": "model-00002-of-00004.safetensors",
55
+ "language_model.model.layers.15.attention.wo.weight": "model-00002-of-00004.safetensors",
56
+ "language_model.model.layers.15.attention.wqkv.weight": "model-00002-of-00004.safetensors",
57
+ "language_model.model.layers.15.attention_norm.weight": "model-00002-of-00004.safetensors",
58
+ "language_model.model.layers.15.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
59
+ "language_model.model.layers.15.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
60
+ "language_model.model.layers.15.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
61
+ "language_model.model.layers.15.ffn_norm.weight": "model-00002-of-00004.safetensors",
62
+ "language_model.model.layers.16.attention.wo.weight": "model-00002-of-00004.safetensors",
63
+ "language_model.model.layers.16.attention.wqkv.weight": "model-00002-of-00004.safetensors",
64
+ "language_model.model.layers.16.attention_norm.weight": "model-00002-of-00004.safetensors",
65
+ "language_model.model.layers.16.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
66
+ "language_model.model.layers.16.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
67
+ "language_model.model.layers.16.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
68
+ "language_model.model.layers.16.ffn_norm.weight": "model-00002-of-00004.safetensors",
69
+ "language_model.model.layers.17.attention.wo.weight": "model-00002-of-00004.safetensors",
70
+ "language_model.model.layers.17.attention.wqkv.weight": "model-00002-of-00004.safetensors",
71
+ "language_model.model.layers.17.attention_norm.weight": "model-00002-of-00004.safetensors",
72
+ "language_model.model.layers.17.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
73
+ "language_model.model.layers.17.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
74
+ "language_model.model.layers.17.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
75
+ "language_model.model.layers.17.ffn_norm.weight": "model-00002-of-00004.safetensors",
76
+ "language_model.model.layers.18.attention.wo.weight": "model-00002-of-00004.safetensors",
77
+ "language_model.model.layers.18.attention.wqkv.weight": "model-00002-of-00004.safetensors",
78
+ "language_model.model.layers.18.attention_norm.weight": "model-00002-of-00004.safetensors",
79
+ "language_model.model.layers.18.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
80
+ "language_model.model.layers.18.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
81
+ "language_model.model.layers.18.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
82
+ "language_model.model.layers.18.ffn_norm.weight": "model-00002-of-00004.safetensors",
83
+ "language_model.model.layers.19.attention.wo.weight": "model-00002-of-00004.safetensors",
84
+ "language_model.model.layers.19.attention.wqkv.weight": "model-00002-of-00004.safetensors",
85
+ "language_model.model.layers.19.attention_norm.weight": "model-00003-of-00004.safetensors",
86
+ "language_model.model.layers.19.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
87
+ "language_model.model.layers.19.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
88
+ "language_model.model.layers.19.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
89
+ "language_model.model.layers.19.ffn_norm.weight": "model-00003-of-00004.safetensors",
90
+ "language_model.model.layers.2.attention.wo.weight": "model-00001-of-00004.safetensors",
91
+ "language_model.model.layers.2.attention.wqkv.weight": "model-00001-of-00004.safetensors",
92
+ "language_model.model.layers.2.attention_norm.weight": "model-00001-of-00004.safetensors",
93
+ "language_model.model.layers.2.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
94
+ "language_model.model.layers.2.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
95
+ "language_model.model.layers.2.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
96
+ "language_model.model.layers.2.ffn_norm.weight": "model-00001-of-00004.safetensors",
97
+ "language_model.model.layers.20.attention.wo.weight": "model-00003-of-00004.safetensors",
98
+ "language_model.model.layers.20.attention.wqkv.weight": "model-00003-of-00004.safetensors",
99
+ "language_model.model.layers.20.attention_norm.weight": "model-00003-of-00004.safetensors",
100
+ "language_model.model.layers.20.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
101
+ "language_model.model.layers.20.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
102
+ "language_model.model.layers.20.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
103
+ "language_model.model.layers.20.ffn_norm.weight": "model-00003-of-00004.safetensors",
104
+ "language_model.model.layers.21.attention.wo.weight": "model-00003-of-00004.safetensors",
105
+ "language_model.model.layers.21.attention.wqkv.weight": "model-00003-of-00004.safetensors",
106
+ "language_model.model.layers.21.attention_norm.weight": "model-00003-of-00004.safetensors",
107
+ "language_model.model.layers.21.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
108
+ "language_model.model.layers.21.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
109
+ "language_model.model.layers.21.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
110
+ "language_model.model.layers.21.ffn_norm.weight": "model-00003-of-00004.safetensors",
111
+ "language_model.model.layers.22.attention.wo.weight": "model-00003-of-00004.safetensors",
112
+ "language_model.model.layers.22.attention.wqkv.weight": "model-00003-of-00004.safetensors",
113
+ "language_model.model.layers.22.attention_norm.weight": "model-00003-of-00004.safetensors",
114
+ "language_model.model.layers.22.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
115
+ "language_model.model.layers.22.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
116
+ "language_model.model.layers.22.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
117
+ "language_model.model.layers.22.ffn_norm.weight": "model-00003-of-00004.safetensors",
118
+ "language_model.model.layers.23.attention.wo.weight": "model-00003-of-00004.safetensors",
119
+ "language_model.model.layers.23.attention.wqkv.weight": "model-00003-of-00004.safetensors",
120
+ "language_model.model.layers.23.attention_norm.weight": "model-00003-of-00004.safetensors",
121
+ "language_model.model.layers.23.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
122
+ "language_model.model.layers.23.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
123
+ "language_model.model.layers.23.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
124
+ "language_model.model.layers.23.ffn_norm.weight": "model-00003-of-00004.safetensors",
125
+ "language_model.model.layers.24.attention.wo.weight": "model-00003-of-00004.safetensors",
126
+ "language_model.model.layers.24.attention.wqkv.weight": "model-00003-of-00004.safetensors",
127
+ "language_model.model.layers.24.attention_norm.weight": "model-00003-of-00004.safetensors",
128
+ "language_model.model.layers.24.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
129
+ "language_model.model.layers.24.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
130
+ "language_model.model.layers.24.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
131
+ "language_model.model.layers.24.ffn_norm.weight": "model-00003-of-00004.safetensors",
132
+ "language_model.model.layers.25.attention.wo.weight": "model-00003-of-00004.safetensors",
133
+ "language_model.model.layers.25.attention.wqkv.weight": "model-00003-of-00004.safetensors",
134
+ "language_model.model.layers.25.attention_norm.weight": "model-00003-of-00004.safetensors",
135
+ "language_model.model.layers.25.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
136
+ "language_model.model.layers.25.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
137
+ "language_model.model.layers.25.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
138
+ "language_model.model.layers.25.ffn_norm.weight": "model-00003-of-00004.safetensors",
139
+ "language_model.model.layers.26.attention.wo.weight": "model-00003-of-00004.safetensors",
140
+ "language_model.model.layers.26.attention.wqkv.weight": "model-00003-of-00004.safetensors",
141
+ "language_model.model.layers.26.attention_norm.weight": "model-00003-of-00004.safetensors",
142
+ "language_model.model.layers.26.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
143
+ "language_model.model.layers.26.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
144
+ "language_model.model.layers.26.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
145
+ "language_model.model.layers.26.ffn_norm.weight": "model-00003-of-00004.safetensors",
146
+ "language_model.model.layers.27.attention.wo.weight": "model-00003-of-00004.safetensors",
147
+ "language_model.model.layers.27.attention.wqkv.weight": "model-00003-of-00004.safetensors",
148
+ "language_model.model.layers.27.attention_norm.weight": "model-00003-of-00004.safetensors",
149
+ "language_model.model.layers.27.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
150
+ "language_model.model.layers.27.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
151
+ "language_model.model.layers.27.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
152
+ "language_model.model.layers.27.ffn_norm.weight": "model-00003-of-00004.safetensors",
153
+ "language_model.model.layers.28.attention.wo.weight": "model-00003-of-00004.safetensors",
154
+ "language_model.model.layers.28.attention.wqkv.weight": "model-00003-of-00004.safetensors",
155
+ "language_model.model.layers.28.attention_norm.weight": "model-00003-of-00004.safetensors",
156
+ "language_model.model.layers.28.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
157
+ "language_model.model.layers.28.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
158
+ "language_model.model.layers.28.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
159
+ "language_model.model.layers.28.ffn_norm.weight": "model-00003-of-00004.safetensors",
160
+ "language_model.model.layers.29.attention.wo.weight": "model-00003-of-00004.safetensors",
161
+ "language_model.model.layers.29.attention.wqkv.weight": "model-00003-of-00004.safetensors",
162
+ "language_model.model.layers.29.attention_norm.weight": "model-00003-of-00004.safetensors",
163
+ "language_model.model.layers.29.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
164
+ "language_model.model.layers.29.feed_forward.w2.weight": "model-00003-of-00004.safetensors",
165
+ "language_model.model.layers.29.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
166
+ "language_model.model.layers.29.ffn_norm.weight": "model-00003-of-00004.safetensors",
167
+ "language_model.model.layers.3.attention.wo.weight": "model-00001-of-00004.safetensors",
168
+ "language_model.model.layers.3.attention.wqkv.weight": "model-00001-of-00004.safetensors",
169
+ "language_model.model.layers.3.attention_norm.weight": "model-00001-of-00004.safetensors",
170
+ "language_model.model.layers.3.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
171
+ "language_model.model.layers.3.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
172
+ "language_model.model.layers.3.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
173
+ "language_model.model.layers.3.ffn_norm.weight": "model-00001-of-00004.safetensors",
174
+ "language_model.model.layers.30.attention.wo.weight": "model-00003-of-00004.safetensors",
175
+ "language_model.model.layers.30.attention.wqkv.weight": "model-00003-of-00004.safetensors",
176
+ "language_model.model.layers.30.attention_norm.weight": "model-00004-of-00004.safetensors",
177
+ "language_model.model.layers.30.feed_forward.w1.weight": "model-00003-of-00004.safetensors",
178
+ "language_model.model.layers.30.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
179
+ "language_model.model.layers.30.feed_forward.w3.weight": "model-00003-of-00004.safetensors",
180
+ "language_model.model.layers.30.ffn_norm.weight": "model-00004-of-00004.safetensors",
181
+ "language_model.model.layers.31.attention.wo.weight": "model-00004-of-00004.safetensors",
182
+ "language_model.model.layers.31.attention.wqkv.weight": "model-00004-of-00004.safetensors",
183
+ "language_model.model.layers.31.attention_norm.weight": "model-00004-of-00004.safetensors",
184
+ "language_model.model.layers.31.feed_forward.w1.weight": "model-00004-of-00004.safetensors",
185
+ "language_model.model.layers.31.feed_forward.w2.weight": "model-00004-of-00004.safetensors",
186
+ "language_model.model.layers.31.feed_forward.w3.weight": "model-00004-of-00004.safetensors",
187
+ "language_model.model.layers.31.ffn_norm.weight": "model-00004-of-00004.safetensors",
188
+ "language_model.model.layers.4.attention.wo.weight": "model-00001-of-00004.safetensors",
189
+ "language_model.model.layers.4.attention.wqkv.weight": "model-00001-of-00004.safetensors",
190
+ "language_model.model.layers.4.attention_norm.weight": "model-00001-of-00004.safetensors",
191
+ "language_model.model.layers.4.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
192
+ "language_model.model.layers.4.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
193
+ "language_model.model.layers.4.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
194
+ "language_model.model.layers.4.ffn_norm.weight": "model-00001-of-00004.safetensors",
195
+ "language_model.model.layers.5.attention.wo.weight": "model-00001-of-00004.safetensors",
196
+ "language_model.model.layers.5.attention.wqkv.weight": "model-00001-of-00004.safetensors",
197
+ "language_model.model.layers.5.attention_norm.weight": "model-00001-of-00004.safetensors",
198
+ "language_model.model.layers.5.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
199
+ "language_model.model.layers.5.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
200
+ "language_model.model.layers.5.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
201
+ "language_model.model.layers.5.ffn_norm.weight": "model-00001-of-00004.safetensors",
202
+ "language_model.model.layers.6.attention.wo.weight": "model-00001-of-00004.safetensors",
203
+ "language_model.model.layers.6.attention.wqkv.weight": "model-00001-of-00004.safetensors",
204
+ "language_model.model.layers.6.attention_norm.weight": "model-00001-of-00004.safetensors",
205
+ "language_model.model.layers.6.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
206
+ "language_model.model.layers.6.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
207
+ "language_model.model.layers.6.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
208
+ "language_model.model.layers.6.ffn_norm.weight": "model-00001-of-00004.safetensors",
209
+ "language_model.model.layers.7.attention.wo.weight": "model-00001-of-00004.safetensors",
210
+ "language_model.model.layers.7.attention.wqkv.weight": "model-00001-of-00004.safetensors",
211
+ "language_model.model.layers.7.attention_norm.weight": "model-00001-of-00004.safetensors",
212
+ "language_model.model.layers.7.feed_forward.w1.weight": "model-00001-of-00004.safetensors",
213
+ "language_model.model.layers.7.feed_forward.w2.weight": "model-00001-of-00004.safetensors",
214
+ "language_model.model.layers.7.feed_forward.w3.weight": "model-00001-of-00004.safetensors",
215
+ "language_model.model.layers.7.ffn_norm.weight": "model-00001-of-00004.safetensors",
216
+ "language_model.model.layers.8.attention.wo.weight": "model-00001-of-00004.safetensors",
217
+ "language_model.model.layers.8.attention.wqkv.weight": "model-00001-of-00004.safetensors",
218
+ "language_model.model.layers.8.attention_norm.weight": "model-00002-of-00004.safetensors",
219
+ "language_model.model.layers.8.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
220
+ "language_model.model.layers.8.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
221
+ "language_model.model.layers.8.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
222
+ "language_model.model.layers.8.ffn_norm.weight": "model-00002-of-00004.safetensors",
223
+ "language_model.model.layers.9.attention.wo.weight": "model-00002-of-00004.safetensors",
224
+ "language_model.model.layers.9.attention.wqkv.weight": "model-00002-of-00004.safetensors",
225
+ "language_model.model.layers.9.attention_norm.weight": "model-00002-of-00004.safetensors",
226
+ "language_model.model.layers.9.feed_forward.w1.weight": "model-00002-of-00004.safetensors",
227
+ "language_model.model.layers.9.feed_forward.w2.weight": "model-00002-of-00004.safetensors",
228
+ "language_model.model.layers.9.feed_forward.w3.weight": "model-00002-of-00004.safetensors",
229
+ "language_model.model.layers.9.ffn_norm.weight": "model-00002-of-00004.safetensors",
230
+ "language_model.model.norm.weight": "model-00004-of-00004.safetensors",
231
+ "language_model.model.tok_embeddings.weight": "model-00001-of-00004.safetensors",
232
+ "language_model.output.weight": "model-00004-of-00004.safetensors",
233
+ "mlp1.0.bias": "model-00004-of-00004.safetensors",
234
+ "mlp1.0.weight": "model-00004-of-00004.safetensors",
235
+ "mlp1.1.bias": "model-00004-of-00004.safetensors",
236
+ "mlp1.1.weight": "model-00004-of-00004.safetensors",
237
+ "mlp1.3.bias": "model-00004-of-00004.safetensors",
238
+ "mlp1.3.weight": "model-00004-of-00004.safetensors",
239
+ "vision_model.embeddings.class_embedding": "model-00001-of-00004.safetensors",
240
+ "vision_model.embeddings.patch_embedding.bias": "model-00001-of-00004.safetensors",
241
+ "vision_model.embeddings.patch_embedding.weight": "model-00001-of-00004.safetensors",
242
+ "vision_model.embeddings.position_embedding": "model-00001-of-00004.safetensors",
243
+ "vision_model.encoder.layers.0.attn.proj.bias": "model-00001-of-00004.safetensors",
244
+ "vision_model.encoder.layers.0.attn.proj.weight": "model-00001-of-00004.safetensors",
245
+ "vision_model.encoder.layers.0.attn.qkv.bias": "model-00001-of-00004.safetensors",
246
+ "vision_model.encoder.layers.0.attn.qkv.weight": "model-00001-of-00004.safetensors",
247
+ "vision_model.encoder.layers.0.ls1": "model-00001-of-00004.safetensors",
248
+ "vision_model.encoder.layers.0.ls2": "model-00001-of-00004.safetensors",
249
+ "vision_model.encoder.layers.0.mlp.fc1.bias": "model-00001-of-00004.safetensors",
250
+ "vision_model.encoder.layers.0.mlp.fc1.weight": "model-00001-of-00004.safetensors",
251
+ "vision_model.encoder.layers.0.mlp.fc2.bias": "model-00001-of-00004.safetensors",
252
+ "vision_model.encoder.layers.0.mlp.fc2.weight": "model-00001-of-00004.safetensors",
253
+ "vision_model.encoder.layers.0.norm1.bias": "model-00001-of-00004.safetensors",
254
+ "vision_model.encoder.layers.0.norm1.weight": "model-00001-of-00004.safetensors",
255
+ "vision_model.encoder.layers.0.norm2.bias": "model-00001-of-00004.safetensors",
256
+ "vision_model.encoder.layers.0.norm2.weight": "model-00001-of-00004.safetensors",
257
+ "vision_model.encoder.layers.1.attn.proj.bias": "model-00001-of-00004.safetensors",
258
+ "vision_model.encoder.layers.1.attn.proj.weight": "model-00001-of-00004.safetensors",
259
+ "vision_model.encoder.layers.1.attn.qkv.bias": "model-00001-of-00004.safetensors",
260
+ "vision_model.encoder.layers.1.attn.qkv.weight": "model-00001-of-00004.safetensors",
261
+ "vision_model.encoder.layers.1.ls1": "model-00001-of-00004.safetensors",
262
+ "vision_model.encoder.layers.1.ls2": "model-00001-of-00004.safetensors",
263
+ "vision_model.encoder.layers.1.mlp.fc1.bias": "model-00001-of-00004.safetensors",
264
+ "vision_model.encoder.layers.1.mlp.fc1.weight": "model-00001-of-00004.safetensors",
265
+ "vision_model.encoder.layers.1.mlp.fc2.bias": "model-00001-of-00004.safetensors",
266
+ "vision_model.encoder.layers.1.mlp.fc2.weight": "model-00001-of-00004.safetensors",
267
+ "vision_model.encoder.layers.1.norm1.bias": "model-00001-of-00004.safetensors",
268
+ "vision_model.encoder.layers.1.norm1.weight": "model-00001-of-00004.safetensors",
269
+ "vision_model.encoder.layers.1.norm2.bias": "model-00001-of-00004.safetensors",
270
+ "vision_model.encoder.layers.1.norm2.weight": "model-00001-of-00004.safetensors",
271
+ "vision_model.encoder.layers.10.attn.proj.bias": "model-00001-of-00004.safetensors",
272
+ "vision_model.encoder.layers.10.attn.proj.weight": "model-00001-of-00004.safetensors",
273
+ "vision_model.encoder.layers.10.attn.qkv.bias": "model-00001-of-00004.safetensors",
274
+ "vision_model.encoder.layers.10.attn.qkv.weight": "model-00001-of-00004.safetensors",
275
+ "vision_model.encoder.layers.10.ls1": "model-00001-of-00004.safetensors",
276
+ "vision_model.encoder.layers.10.ls2": "model-00001-of-00004.safetensors",
277
+ "vision_model.encoder.layers.10.mlp.fc1.bias": "model-00001-of-00004.safetensors",
278
+ "vision_model.encoder.layers.10.mlp.fc1.weight": "model-00001-of-00004.safetensors",
279
+ "vision_model.encoder.layers.10.mlp.fc2.bias": "model-00001-of-00004.safetensors",
280
+ "vision_model.encoder.layers.10.mlp.fc2.weight": "model-00001-of-00004.safetensors",
281
+ "vision_model.encoder.layers.10.norm1.bias": "model-00001-of-00004.safetensors",
282
+ "vision_model.encoder.layers.10.norm1.weight": "model-00001-of-00004.safetensors",
283
+ "vision_model.encoder.layers.10.norm2.bias": "model-00001-of-00004.safetensors",
284
+ "vision_model.encoder.layers.10.norm2.weight": "model-00001-of-00004.safetensors",
285
+ "vision_model.encoder.layers.11.attn.proj.bias": "model-00001-of-00004.safetensors",
286
+ "vision_model.encoder.layers.11.attn.proj.weight": "model-00001-of-00004.safetensors",
287
+ "vision_model.encoder.layers.11.attn.qkv.bias": "model-00001-of-00004.safetensors",
288
+ "vision_model.encoder.layers.11.attn.qkv.weight": "model-00001-of-00004.safetensors",
289
+ "vision_model.encoder.layers.11.ls1": "model-00001-of-00004.safetensors",
290
+ "vision_model.encoder.layers.11.ls2": "model-00001-of-00004.safetensors",
291
+ "vision_model.encoder.layers.11.mlp.fc1.bias": "model-00001-of-00004.safetensors",
292
+ "vision_model.encoder.layers.11.mlp.fc1.weight": "model-00001-of-00004.safetensors",
293
+ "vision_model.encoder.layers.11.mlp.fc2.bias": "model-00001-of-00004.safetensors",
294
+ "vision_model.encoder.layers.11.mlp.fc2.weight": "model-00001-of-00004.safetensors",
295
+ "vision_model.encoder.layers.11.norm1.bias": "model-00001-of-00004.safetensors",
296
+ "vision_model.encoder.layers.11.norm1.weight": "model-00001-of-00004.safetensors",
297
+ "vision_model.encoder.layers.11.norm2.bias": "model-00001-of-00004.safetensors",
298
+ "vision_model.encoder.layers.11.norm2.weight": "model-00001-of-00004.safetensors",
299
+ "vision_model.encoder.layers.12.attn.proj.bias": "model-00001-of-00004.safetensors",
300
+ "vision_model.encoder.layers.12.attn.proj.weight": "model-00001-of-00004.safetensors",
301
+ "vision_model.encoder.layers.12.attn.qkv.bias": "model-00001-of-00004.safetensors",
302
+ "vision_model.encoder.layers.12.attn.qkv.weight": "model-00001-of-00004.safetensors",
303
+ "vision_model.encoder.layers.12.ls1": "model-00001-of-00004.safetensors",
304
+ "vision_model.encoder.layers.12.ls2": "model-00001-of-00004.safetensors",
305
+ "vision_model.encoder.layers.12.mlp.fc1.bias": "model-00001-of-00004.safetensors",
306
+ "vision_model.encoder.layers.12.mlp.fc1.weight": "model-00001-of-00004.safetensors",
307
+ "vision_model.encoder.layers.12.mlp.fc2.bias": "model-00001-of-00004.safetensors",
308
+ "vision_model.encoder.layers.12.mlp.fc2.weight": "model-00001-of-00004.safetensors",
309
+ "vision_model.encoder.layers.12.norm1.bias": "model-00001-of-00004.safetensors",
310
+ "vision_model.encoder.layers.12.norm1.weight": "model-00001-of-00004.safetensors",
311
+ "vision_model.encoder.layers.12.norm2.bias": "model-00001-of-00004.safetensors",
312
+ "vision_model.encoder.layers.12.norm2.weight": "model-00001-of-00004.safetensors",
313
+ "vision_model.encoder.layers.13.attn.proj.bias": "model-00001-of-00004.safetensors",
314
+ "vision_model.encoder.layers.13.attn.proj.weight": "model-00001-of-00004.safetensors",
315
+ "vision_model.encoder.layers.13.attn.qkv.bias": "model-00001-of-00004.safetensors",
316
+ "vision_model.encoder.layers.13.attn.qkv.weight": "model-00001-of-00004.safetensors",
317
+ "vision_model.encoder.layers.13.ls1": "model-00001-of-00004.safetensors",
318
+ "vision_model.encoder.layers.13.ls2": "model-00001-of-00004.safetensors",
319
+ "vision_model.encoder.layers.13.mlp.fc1.bias": "model-00001-of-00004.safetensors",
320
+ "vision_model.encoder.layers.13.mlp.fc1.weight": "model-00001-of-00004.safetensors",
321
+ "vision_model.encoder.layers.13.mlp.fc2.bias": "model-00001-of-00004.safetensors",
322
+ "vision_model.encoder.layers.13.mlp.fc2.weight": "model-00001-of-00004.safetensors",
323
+ "vision_model.encoder.layers.13.norm1.bias": "model-00001-of-00004.safetensors",
324
+ "vision_model.encoder.layers.13.norm1.weight": "model-00001-of-00004.safetensors",
325
+ "vision_model.encoder.layers.13.norm2.bias": "model-00001-of-00004.safetensors",
326
+ "vision_model.encoder.layers.13.norm2.weight": "model-00001-of-00004.safetensors",
327
+ "vision_model.encoder.layers.14.attn.proj.bias": "model-00001-of-00004.safetensors",
328
+ "vision_model.encoder.layers.14.attn.proj.weight": "model-00001-of-00004.safetensors",
329
+ "vision_model.encoder.layers.14.attn.qkv.bias": "model-00001-of-00004.safetensors",
330
+ "vision_model.encoder.layers.14.attn.qkv.weight": "model-00001-of-00004.safetensors",
331
+ "vision_model.encoder.layers.14.ls1": "model-00001-of-00004.safetensors",
332
+ "vision_model.encoder.layers.14.ls2": "model-00001-of-00004.safetensors",
333
+ "vision_model.encoder.layers.14.mlp.fc1.bias": "model-00001-of-00004.safetensors",
334
+ "vision_model.encoder.layers.14.mlp.fc1.weight": "model-00001-of-00004.safetensors",
335
+ "vision_model.encoder.layers.14.mlp.fc2.bias": "model-00001-of-00004.safetensors",
336
+ "vision_model.encoder.layers.14.mlp.fc2.weight": "model-00001-of-00004.safetensors",
337
+ "vision_model.encoder.layers.14.norm1.bias": "model-00001-of-00004.safetensors",
338
+ "vision_model.encoder.layers.14.norm1.weight": "model-00001-of-00004.safetensors",
339
+ "vision_model.encoder.layers.14.norm2.bias": "model-00001-of-00004.safetensors",
340
+ "vision_model.encoder.layers.14.norm2.weight": "model-00001-of-00004.safetensors",
341
+ "vision_model.encoder.layers.15.attn.proj.bias": "model-00001-of-00004.safetensors",
342
+ "vision_model.encoder.layers.15.attn.proj.weight": "model-00001-of-00004.safetensors",
343
+ "vision_model.encoder.layers.15.attn.qkv.bias": "model-00001-of-00004.safetensors",
344
+ "vision_model.encoder.layers.15.attn.qkv.weight": "model-00001-of-00004.safetensors",
345
+ "vision_model.encoder.layers.15.ls1": "model-00001-of-00004.safetensors",
346
+ "vision_model.encoder.layers.15.ls2": "model-00001-of-00004.safetensors",
347
+ "vision_model.encoder.layers.15.mlp.fc1.bias": "model-00001-of-00004.safetensors",
348
+ "vision_model.encoder.layers.15.mlp.fc1.weight": "model-00001-of-00004.safetensors",
349
+ "vision_model.encoder.layers.15.mlp.fc2.bias": "model-00001-of-00004.safetensors",
350
+ "vision_model.encoder.layers.15.mlp.fc2.weight": "model-00001-of-00004.safetensors",
351
+ "vision_model.encoder.layers.15.norm1.bias": "model-00001-of-00004.safetensors",
352
+ "vision_model.encoder.layers.15.norm1.weight": "model-00001-of-00004.safetensors",
353
+ "vision_model.encoder.layers.15.norm2.bias": "model-00001-of-00004.safetensors",
354
+ "vision_model.encoder.layers.15.norm2.weight": "model-00001-of-00004.safetensors",
355
+ "vision_model.encoder.layers.16.attn.proj.bias": "model-00001-of-00004.safetensors",
356
+ "vision_model.encoder.layers.16.attn.proj.weight": "model-00001-of-00004.safetensors",
357
+ "vision_model.encoder.layers.16.attn.qkv.bias": "model-00001-of-00004.safetensors",
358
+ "vision_model.encoder.layers.16.attn.qkv.weight": "model-00001-of-00004.safetensors",
359
+ "vision_model.encoder.layers.16.ls1": "model-00001-of-00004.safetensors",
360
+ "vision_model.encoder.layers.16.ls2": "model-00001-of-00004.safetensors",
361
+ "vision_model.encoder.layers.16.mlp.fc1.bias": "model-00001-of-00004.safetensors",
362
+ "vision_model.encoder.layers.16.mlp.fc1.weight": "model-00001-of-00004.safetensors",
363
+ "vision_model.encoder.layers.16.mlp.fc2.bias": "model-00001-of-00004.safetensors",
364
+ "vision_model.encoder.layers.16.mlp.fc2.weight": "model-00001-of-00004.safetensors",
365
+ "vision_model.encoder.layers.16.norm1.bias": "model-00001-of-00004.safetensors",
366
+ "vision_model.encoder.layers.16.norm1.weight": "model-00001-of-00004.safetensors",
367
+ "vision_model.encoder.layers.16.norm2.bias": "model-00001-of-00004.safetensors",
368
+ "vision_model.encoder.layers.16.norm2.weight": "model-00001-of-00004.safetensors",
369
+ "vision_model.encoder.layers.17.attn.proj.bias": "model-00001-of-00004.safetensors",
370
+ "vision_model.encoder.layers.17.attn.proj.weight": "model-00001-of-00004.safetensors",
371
+ "vision_model.encoder.layers.17.attn.qkv.bias": "model-00001-of-00004.safetensors",
372
+ "vision_model.encoder.layers.17.attn.qkv.weight": "model-00001-of-00004.safetensors",
373
+ "vision_model.encoder.layers.17.ls1": "model-00001-of-00004.safetensors",
374
+ "vision_model.encoder.layers.17.ls2": "model-00001-of-00004.safetensors",
375
+ "vision_model.encoder.layers.17.mlp.fc1.bias": "model-00001-of-00004.safetensors",
376
+ "vision_model.encoder.layers.17.mlp.fc1.weight": "model-00001-of-00004.safetensors",
377
+ "vision_model.encoder.layers.17.mlp.fc2.bias": "model-00001-of-00004.safetensors",
378
+ "vision_model.encoder.layers.17.mlp.fc2.weight": "model-00001-of-00004.safetensors",
379
+ "vision_model.encoder.layers.17.norm1.bias": "model-00001-of-00004.safetensors",
380
+ "vision_model.encoder.layers.17.norm1.weight": "model-00001-of-00004.safetensors",
381
+ "vision_model.encoder.layers.17.norm2.bias": "model-00001-of-00004.safetensors",
382
+ "vision_model.encoder.layers.17.norm2.weight": "model-00001-of-00004.safetensors",
383
+ "vision_model.encoder.layers.18.attn.proj.bias": "model-00001-of-00004.safetensors",
384
+ "vision_model.encoder.layers.18.attn.proj.weight": "model-00001-of-00004.safetensors",
385
+ "vision_model.encoder.layers.18.attn.qkv.bias": "model-00001-of-00004.safetensors",
386
+ "vision_model.encoder.layers.18.attn.qkv.weight": "model-00001-of-00004.safetensors",
387
+ "vision_model.encoder.layers.18.ls1": "model-00001-of-00004.safetensors",
388
+ "vision_model.encoder.layers.18.ls2": "model-00001-of-00004.safetensors",
389
+ "vision_model.encoder.layers.18.mlp.fc1.bias": "model-00001-of-00004.safetensors",
390
+ "vision_model.encoder.layers.18.mlp.fc1.weight": "model-00001-of-00004.safetensors",
391
+ "vision_model.encoder.layers.18.mlp.fc2.bias": "model-00001-of-00004.safetensors",
392
+ "vision_model.encoder.layers.18.mlp.fc2.weight": "model-00001-of-00004.safetensors",
393
+ "vision_model.encoder.layers.18.norm1.bias": "model-00001-of-00004.safetensors",
394
+ "vision_model.encoder.layers.18.norm1.weight": "model-00001-of-00004.safetensors",
395
+ "vision_model.encoder.layers.18.norm2.bias": "model-00001-of-00004.safetensors",
396
+ "vision_model.encoder.layers.18.norm2.weight": "model-00001-of-00004.safetensors",
397
+ "vision_model.encoder.layers.19.attn.proj.bias": "model-00001-of-00004.safetensors",
398
+ "vision_model.encoder.layers.19.attn.proj.weight": "model-00001-of-00004.safetensors",
399
+ "vision_model.encoder.layers.19.attn.qkv.bias": "model-00001-of-00004.safetensors",
400
+ "vision_model.encoder.layers.19.attn.qkv.weight": "model-00001-of-00004.safetensors",
401
+ "vision_model.encoder.layers.19.ls1": "model-00001-of-00004.safetensors",
402
+ "vision_model.encoder.layers.19.ls2": "model-00001-of-00004.safetensors",
403
+ "vision_model.encoder.layers.19.mlp.fc1.bias": "model-00001-of-00004.safetensors",
404
+ "vision_model.encoder.layers.19.mlp.fc1.weight": "model-00001-of-00004.safetensors",
405
+ "vision_model.encoder.layers.19.mlp.fc2.bias": "model-00001-of-00004.safetensors",
406
+ "vision_model.encoder.layers.19.mlp.fc2.weight": "model-00001-of-00004.safetensors",
407
+ "vision_model.encoder.layers.19.norm1.bias": "model-00001-of-00004.safetensors",
408
+ "vision_model.encoder.layers.19.norm1.weight": "model-00001-of-00004.safetensors",
409
+ "vision_model.encoder.layers.19.norm2.bias": "model-00001-of-00004.safetensors",
410
+ "vision_model.encoder.layers.19.norm2.weight": "model-00001-of-00004.safetensors",
411
+ "vision_model.encoder.layers.2.attn.proj.bias": "model-00001-of-00004.safetensors",
412
+ "vision_model.encoder.layers.2.attn.proj.weight": "model-00001-of-00004.safetensors",
413
+ "vision_model.encoder.layers.2.attn.qkv.bias": "model-00001-of-00004.safetensors",
414
+ "vision_model.encoder.layers.2.attn.qkv.weight": "model-00001-of-00004.safetensors",
415
+ "vision_model.encoder.layers.2.ls1": "model-00001-of-00004.safetensors",
416
+ "vision_model.encoder.layers.2.ls2": "model-00001-of-00004.safetensors",
417
+ "vision_model.encoder.layers.2.mlp.fc1.bias": "model-00001-of-00004.safetensors",
418
+ "vision_model.encoder.layers.2.mlp.fc1.weight": "model-00001-of-00004.safetensors",
419
+ "vision_model.encoder.layers.2.mlp.fc2.bias": "model-00001-of-00004.safetensors",
420
+ "vision_model.encoder.layers.2.mlp.fc2.weight": "model-00001-of-00004.safetensors",
421
+ "vision_model.encoder.layers.2.norm1.bias": "model-00001-of-00004.safetensors",
422
+ "vision_model.encoder.layers.2.norm1.weight": "model-00001-of-00004.safetensors",
423
+ "vision_model.encoder.layers.2.norm2.bias": "model-00001-of-00004.safetensors",
424
+ "vision_model.encoder.layers.2.norm2.weight": "model-00001-of-00004.safetensors",
425
+ "vision_model.encoder.layers.20.attn.proj.bias": "model-00001-of-00004.safetensors",
426
+ "vision_model.encoder.layers.20.attn.proj.weight": "model-00001-of-00004.safetensors",
427
+ "vision_model.encoder.layers.20.attn.qkv.bias": "model-00001-of-00004.safetensors",
428
+ "vision_model.encoder.layers.20.attn.qkv.weight": "model-00001-of-00004.safetensors",
429
+ "vision_model.encoder.layers.20.ls1": "model-00001-of-00004.safetensors",
430
+ "vision_model.encoder.layers.20.ls2": "model-00001-of-00004.safetensors",
431
+ "vision_model.encoder.layers.20.mlp.fc1.bias": "model-00001-of-00004.safetensors",
432
+ "vision_model.encoder.layers.20.mlp.fc1.weight": "model-00001-of-00004.safetensors",
433
+ "vision_model.encoder.layers.20.mlp.fc2.bias": "model-00001-of-00004.safetensors",
434
+ "vision_model.encoder.layers.20.mlp.fc2.weight": "model-00001-of-00004.safetensors",
435
+ "vision_model.encoder.layers.20.norm1.bias": "model-00001-of-00004.safetensors",
436
+ "vision_model.encoder.layers.20.norm1.weight": "model-00001-of-00004.safetensors",
437
+ "vision_model.encoder.layers.20.norm2.bias": "model-00001-of-00004.safetensors",
438
+ "vision_model.encoder.layers.20.norm2.weight": "model-00001-of-00004.safetensors",
439
+ "vision_model.encoder.layers.21.attn.proj.bias": "model-00001-of-00004.safetensors",
440
+ "vision_model.encoder.layers.21.attn.proj.weight": "model-00001-of-00004.safetensors",
441
+ "vision_model.encoder.layers.21.attn.qkv.bias": "model-00001-of-00004.safetensors",
442
+ "vision_model.encoder.layers.21.attn.qkv.weight": "model-00001-of-00004.safetensors",
443
+ "vision_model.encoder.layers.21.ls1": "model-00001-of-00004.safetensors",
444
+ "vision_model.encoder.layers.21.ls2": "model-00001-of-00004.safetensors",
445
+ "vision_model.encoder.layers.21.mlp.fc1.bias": "model-00001-of-00004.safetensors",
446
+ "vision_model.encoder.layers.21.mlp.fc1.weight": "model-00001-of-00004.safetensors",
447
+ "vision_model.encoder.layers.21.mlp.fc2.bias": "model-00001-of-00004.safetensors",
448
+ "vision_model.encoder.layers.21.mlp.fc2.weight": "model-00001-of-00004.safetensors",
449
+ "vision_model.encoder.layers.21.norm1.bias": "model-00001-of-00004.safetensors",
450
+ "vision_model.encoder.layers.21.norm1.weight": "model-00001-of-00004.safetensors",
451
+ "vision_model.encoder.layers.21.norm2.bias": "model-00001-of-00004.safetensors",
452
+ "vision_model.encoder.layers.21.norm2.weight": "model-00001-of-00004.safetensors",
453
+ "vision_model.encoder.layers.22.attn.proj.bias": "model-00001-of-00004.safetensors",
454
+ "vision_model.encoder.layers.22.attn.proj.weight": "model-00001-of-00004.safetensors",
455
+ "vision_model.encoder.layers.22.attn.qkv.bias": "model-00001-of-00004.safetensors",
456
+ "vision_model.encoder.layers.22.attn.qkv.weight": "model-00001-of-00004.safetensors",
457
+ "vision_model.encoder.layers.22.ls1": "model-00001-of-00004.safetensors",
458
+ "vision_model.encoder.layers.22.ls2": "model-00001-of-00004.safetensors",
459
+ "vision_model.encoder.layers.22.mlp.fc1.bias": "model-00001-of-00004.safetensors",
460
+ "vision_model.encoder.layers.22.mlp.fc1.weight": "model-00001-of-00004.safetensors",
461
+ "vision_model.encoder.layers.22.mlp.fc2.bias": "model-00001-of-00004.safetensors",
462
+ "vision_model.encoder.layers.22.mlp.fc2.weight": "model-00001-of-00004.safetensors",
463
+ "vision_model.encoder.layers.22.norm1.bias": "model-00001-of-00004.safetensors",
464
+ "vision_model.encoder.layers.22.norm1.weight": "model-00001-of-00004.safetensors",
465
+ "vision_model.encoder.layers.22.norm2.bias": "model-00001-of-00004.safetensors",
466
+ "vision_model.encoder.layers.22.norm2.weight": "model-00001-of-00004.safetensors",
467
+ "vision_model.encoder.layers.23.attn.proj.bias": "model-00001-of-00004.safetensors",
468
+ "vision_model.encoder.layers.23.attn.proj.weight": "model-00001-of-00004.safetensors",
469
+ "vision_model.encoder.layers.23.attn.qkv.bias": "model-00001-of-00004.safetensors",
470
+ "vision_model.encoder.layers.23.attn.qkv.weight": "model-00001-of-00004.safetensors",
471
+ "vision_model.encoder.layers.23.ls1": "model-00001-of-00004.safetensors",
472
+ "vision_model.encoder.layers.23.ls2": "model-00001-of-00004.safetensors",
473
+ "vision_model.encoder.layers.23.mlp.fc1.bias": "model-00001-of-00004.safetensors",
474
+ "vision_model.encoder.layers.23.mlp.fc1.weight": "model-00001-of-00004.safetensors",
475
+ "vision_model.encoder.layers.23.mlp.fc2.bias": "model-00001-of-00004.safetensors",
476
+ "vision_model.encoder.layers.23.mlp.fc2.weight": "model-00001-of-00004.safetensors",
477
+ "vision_model.encoder.layers.23.norm1.bias": "model-00001-of-00004.safetensors",
478
+ "vision_model.encoder.layers.23.norm1.weight": "model-00001-of-00004.safetensors",
479
+ "vision_model.encoder.layers.23.norm2.bias": "model-00001-of-00004.safetensors",
480
+ "vision_model.encoder.layers.23.norm2.weight": "model-00001-of-00004.safetensors",
481
+ "vision_model.encoder.layers.3.attn.proj.bias": "model-00001-of-00004.safetensors",
482
+ "vision_model.encoder.layers.3.attn.proj.weight": "model-00001-of-00004.safetensors",
483
+ "vision_model.encoder.layers.3.attn.qkv.bias": "model-00001-of-00004.safetensors",
484
+ "vision_model.encoder.layers.3.attn.qkv.weight": "model-00001-of-00004.safetensors",
485
+ "vision_model.encoder.layers.3.ls1": "model-00001-of-00004.safetensors",
486
+ "vision_model.encoder.layers.3.ls2": "model-00001-of-00004.safetensors",
487
+ "vision_model.encoder.layers.3.mlp.fc1.bias": "model-00001-of-00004.safetensors",
488
+ "vision_model.encoder.layers.3.mlp.fc1.weight": "model-00001-of-00004.safetensors",
489
+ "vision_model.encoder.layers.3.mlp.fc2.bias": "model-00001-of-00004.safetensors",
490
+ "vision_model.encoder.layers.3.mlp.fc2.weight": "model-00001-of-00004.safetensors",
491
+ "vision_model.encoder.layers.3.norm1.bias": "model-00001-of-00004.safetensors",
492
+ "vision_model.encoder.layers.3.norm1.weight": "model-00001-of-00004.safetensors",
493
+ "vision_model.encoder.layers.3.norm2.bias": "model-00001-of-00004.safetensors",
494
+ "vision_model.encoder.layers.3.norm2.weight": "model-00001-of-00004.safetensors",
495
+ "vision_model.encoder.layers.4.attn.proj.bias": "model-00001-of-00004.safetensors",
496
+ "vision_model.encoder.layers.4.attn.proj.weight": "model-00001-of-00004.safetensors",
497
+ "vision_model.encoder.layers.4.attn.qkv.bias": "model-00001-of-00004.safetensors",
498
+ "vision_model.encoder.layers.4.attn.qkv.weight": "model-00001-of-00004.safetensors",
499
+ "vision_model.encoder.layers.4.ls1": "model-00001-of-00004.safetensors",
500
+ "vision_model.encoder.layers.4.ls2": "model-00001-of-00004.safetensors",
501
+ "vision_model.encoder.layers.4.mlp.fc1.bias": "model-00001-of-00004.safetensors",
502
+ "vision_model.encoder.layers.4.mlp.fc1.weight": "model-00001-of-00004.safetensors",
503
+ "vision_model.encoder.layers.4.mlp.fc2.bias": "model-00001-of-00004.safetensors",
504
+ "vision_model.encoder.layers.4.mlp.fc2.weight": "model-00001-of-00004.safetensors",
505
+ "vision_model.encoder.layers.4.norm1.bias": "model-00001-of-00004.safetensors",
506
+ "vision_model.encoder.layers.4.norm1.weight": "model-00001-of-00004.safetensors",
507
+ "vision_model.encoder.layers.4.norm2.bias": "model-00001-of-00004.safetensors",
508
+ "vision_model.encoder.layers.4.norm2.weight": "model-00001-of-00004.safetensors",
509
+ "vision_model.encoder.layers.5.attn.proj.bias": "model-00001-of-00004.safetensors",
510
+ "vision_model.encoder.layers.5.attn.proj.weight": "model-00001-of-00004.safetensors",
511
+ "vision_model.encoder.layers.5.attn.qkv.bias": "model-00001-of-00004.safetensors",
512
+ "vision_model.encoder.layers.5.attn.qkv.weight": "model-00001-of-00004.safetensors",
513
+ "vision_model.encoder.layers.5.ls1": "model-00001-of-00004.safetensors",
514
+ "vision_model.encoder.layers.5.ls2": "model-00001-of-00004.safetensors",
515
+ "vision_model.encoder.layers.5.mlp.fc1.bias": "model-00001-of-00004.safetensors",
516
+ "vision_model.encoder.layers.5.mlp.fc1.weight": "model-00001-of-00004.safetensors",
517
+ "vision_model.encoder.layers.5.mlp.fc2.bias": "model-00001-of-00004.safetensors",
518
+ "vision_model.encoder.layers.5.mlp.fc2.weight": "model-00001-of-00004.safetensors",
519
+ "vision_model.encoder.layers.5.norm1.bias": "model-00001-of-00004.safetensors",
520
+ "vision_model.encoder.layers.5.norm1.weight": "model-00001-of-00004.safetensors",
521
+ "vision_model.encoder.layers.5.norm2.bias": "model-00001-of-00004.safetensors",
522
+ "vision_model.encoder.layers.5.norm2.weight": "model-00001-of-00004.safetensors",
523
+ "vision_model.encoder.layers.6.attn.proj.bias": "model-00001-of-00004.safetensors",
524
+ "vision_model.encoder.layers.6.attn.proj.weight": "model-00001-of-00004.safetensors",
525
+ "vision_model.encoder.layers.6.attn.qkv.bias": "model-00001-of-00004.safetensors",
526
+ "vision_model.encoder.layers.6.attn.qkv.weight": "model-00001-of-00004.safetensors",
527
+ "vision_model.encoder.layers.6.ls1": "model-00001-of-00004.safetensors",
528
+ "vision_model.encoder.layers.6.ls2": "model-00001-of-00004.safetensors",
529
+ "vision_model.encoder.layers.6.mlp.fc1.bias": "model-00001-of-00004.safetensors",
530
+ "vision_model.encoder.layers.6.mlp.fc1.weight": "model-00001-of-00004.safetensors",
531
+ "vision_model.encoder.layers.6.mlp.fc2.bias": "model-00001-of-00004.safetensors",
532
+ "vision_model.encoder.layers.6.mlp.fc2.weight": "model-00001-of-00004.safetensors",
533
+ "vision_model.encoder.layers.6.norm1.bias": "model-00001-of-00004.safetensors",
534
+ "vision_model.encoder.layers.6.norm1.weight": "model-00001-of-00004.safetensors",
535
+ "vision_model.encoder.layers.6.norm2.bias": "model-00001-of-00004.safetensors",
536
+ "vision_model.encoder.layers.6.norm2.weight": "model-00001-of-00004.safetensors",
537
+ "vision_model.encoder.layers.7.attn.proj.bias": "model-00001-of-00004.safetensors",
538
+ "vision_model.encoder.layers.7.attn.proj.weight": "model-00001-of-00004.safetensors",
539
+ "vision_model.encoder.layers.7.attn.qkv.bias": "model-00001-of-00004.safetensors",
540
+ "vision_model.encoder.layers.7.attn.qkv.weight": "model-00001-of-00004.safetensors",
541
+ "vision_model.encoder.layers.7.ls1": "model-00001-of-00004.safetensors",
542
+ "vision_model.encoder.layers.7.ls2": "model-00001-of-00004.safetensors",
543
+ "vision_model.encoder.layers.7.mlp.fc1.bias": "model-00001-of-00004.safetensors",
544
+ "vision_model.encoder.layers.7.mlp.fc1.weight": "model-00001-of-00004.safetensors",
545
+ "vision_model.encoder.layers.7.mlp.fc2.bias": "model-00001-of-00004.safetensors",
546
+ "vision_model.encoder.layers.7.mlp.fc2.weight": "model-00001-of-00004.safetensors",
547
+ "vision_model.encoder.layers.7.norm1.bias": "model-00001-of-00004.safetensors",
548
+ "vision_model.encoder.layers.7.norm1.weight": "model-00001-of-00004.safetensors",
549
+ "vision_model.encoder.layers.7.norm2.bias": "model-00001-of-00004.safetensors",
550
+ "vision_model.encoder.layers.7.norm2.weight": "model-00001-of-00004.safetensors",
551
+ "vision_model.encoder.layers.8.attn.proj.bias": "model-00001-of-00004.safetensors",
552
+ "vision_model.encoder.layers.8.attn.proj.weight": "model-00001-of-00004.safetensors",
553
+ "vision_model.encoder.layers.8.attn.qkv.bias": "model-00001-of-00004.safetensors",
554
+ "vision_model.encoder.layers.8.attn.qkv.weight": "model-00001-of-00004.safetensors",
555
+ "vision_model.encoder.layers.8.ls1": "model-00001-of-00004.safetensors",
556
+ "vision_model.encoder.layers.8.ls2": "model-00001-of-00004.safetensors",
557
+ "vision_model.encoder.layers.8.mlp.fc1.bias": "model-00001-of-00004.safetensors",
558
+ "vision_model.encoder.layers.8.mlp.fc1.weight": "model-00001-of-00004.safetensors",
559
+ "vision_model.encoder.layers.8.mlp.fc2.bias": "model-00001-of-00004.safetensors",
560
+ "vision_model.encoder.layers.8.mlp.fc2.weight": "model-00001-of-00004.safetensors",
561
+ "vision_model.encoder.layers.8.norm1.bias": "model-00001-of-00004.safetensors",
562
+ "vision_model.encoder.layers.8.norm1.weight": "model-00001-of-00004.safetensors",
563
+ "vision_model.encoder.layers.8.norm2.bias": "model-00001-of-00004.safetensors",
564
+ "vision_model.encoder.layers.8.norm2.weight": "model-00001-of-00004.safetensors",
565
+ "vision_model.encoder.layers.9.attn.proj.bias": "model-00001-of-00004.safetensors",
566
+ "vision_model.encoder.layers.9.attn.proj.weight": "model-00001-of-00004.safetensors",
567
+ "vision_model.encoder.layers.9.attn.qkv.bias": "model-00001-of-00004.safetensors",
568
+ "vision_model.encoder.layers.9.attn.qkv.weight": "model-00001-of-00004.safetensors",
569
+ "vision_model.encoder.layers.9.ls1": "model-00001-of-00004.safetensors",
570
+ "vision_model.encoder.layers.9.ls2": "model-00001-of-00004.safetensors",
571
+ "vision_model.encoder.layers.9.mlp.fc1.bias": "model-00001-of-00004.safetensors",
572
+ "vision_model.encoder.layers.9.mlp.fc1.weight": "model-00001-of-00004.safetensors",
573
+ "vision_model.encoder.layers.9.mlp.fc2.bias": "model-00001-of-00004.safetensors",
574
+ "vision_model.encoder.layers.9.mlp.fc2.weight": "model-00001-of-00004.safetensors",
575
+ "vision_model.encoder.layers.9.norm1.bias": "model-00001-of-00004.safetensors",
576
+ "vision_model.encoder.layers.9.norm1.weight": "model-00001-of-00004.safetensors",
577
+ "vision_model.encoder.layers.9.norm2.bias": "model-00001-of-00004.safetensors",
578
+ "vision_model.encoder.layers.9.norm2.weight": "model-00001-of-00004.safetensors"
579
+ }
580
+ }
modeling_intern_vit.py ADDED
@@ -0,0 +1,430 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ from typing import Optional, Tuple, Union
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint
12
+ from einops import rearrange
13
+ from timm.models.layers import DropPath
14
+ from torch import nn
15
+ from transformers.activations import ACT2FN
16
+ from transformers.modeling_outputs import (BaseModelOutput,
17
+ BaseModelOutputWithPooling)
18
+ from transformers.modeling_utils import PreTrainedModel
19
+ from transformers.utils import logging
20
+
21
+ from .configuration_intern_vit import InternVisionConfig
22
+
23
+ try:
24
+ from flash_attn.bert_padding import pad_input, unpad_input
25
+ from flash_attn.flash_attn_interface import \
26
+ flash_attn_varlen_qkvpacked_func
27
+ has_flash_attn = True
28
+ except:
29
+ print('FlashAttention2 is not installed.')
30
+ has_flash_attn = False
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ class FlashAttention(nn.Module):
36
+ """Implement the scaled dot product attention with softmax.
37
+ Arguments
38
+ ---------
39
+ softmax_scale: The temperature to use for the softmax attention.
40
+ (default: 1/sqrt(d_keys) where d_keys is computed at
41
+ runtime)
42
+ attention_dropout: The dropout rate to apply to the attention
43
+ (default: 0.0)
44
+ """
45
+
46
+ def __init__(self, softmax_scale=None, attention_dropout=0.0, device=None, dtype=None):
47
+ super().__init__()
48
+ self.softmax_scale = softmax_scale
49
+ self.dropout_p = attention_dropout
50
+
51
+ def forward(self, qkv, key_padding_mask=None, causal=False, cu_seqlens=None,
52
+ max_s=None, need_weights=False):
53
+ """Implements the multihead softmax attention.
54
+ Arguments
55
+ ---------
56
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D) if key_padding_mask is None
57
+ if unpadded: (nnz, 3, h, d)
58
+ key_padding_mask: a bool tensor of shape (B, S)
59
+ """
60
+ assert not need_weights
61
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
62
+ assert qkv.is_cuda
63
+
64
+ if cu_seqlens is None:
65
+ batch_size = qkv.shape[0]
66
+ seqlen = qkv.shape[1]
67
+ if key_padding_mask is None:
68
+ qkv = rearrange(qkv, 'b s ... -> (b s) ...')
69
+ max_s = seqlen
70
+ cu_seqlens = torch.arange(0, (batch_size + 1) * seqlen, step=seqlen, dtype=torch.int32,
71
+ device=qkv.device)
72
+ output = flash_attn_varlen_qkvpacked_func(
73
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
74
+ softmax_scale=self.softmax_scale, causal=causal
75
+ )
76
+ output = rearrange(output, '(b s) ... -> b s ...', b=batch_size)
77
+ else:
78
+ nheads = qkv.shape[-2]
79
+ x = rearrange(qkv, 'b s three h d -> b s (three h d)')
80
+ x_unpad, indices, cu_seqlens, max_s = unpad_input(x, key_padding_mask)
81
+ x_unpad = rearrange(x_unpad, 'nnz (three h d) -> nnz three h d', three=3, h=nheads)
82
+ output_unpad = flash_attn_varlen_qkvpacked_func(
83
+ x_unpad, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
84
+ softmax_scale=self.softmax_scale, causal=causal
85
+ )
86
+ output = rearrange(pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'),
87
+ indices, batch_size, seqlen),
88
+ 'b s (h d) -> b s h d', h=nheads)
89
+ else:
90
+ assert max_s is not None
91
+ output = flash_attn_varlen_qkvpacked_func(
92
+ qkv, cu_seqlens, max_s, self.dropout_p if self.training else 0.0,
93
+ softmax_scale=self.softmax_scale, causal=causal
94
+ )
95
+
96
+ return output, None
97
+
98
+
99
+ class InternRMSNorm(nn.Module):
100
+ def __init__(self, hidden_size, eps=1e-6):
101
+ super().__init__()
102
+ self.weight = nn.Parameter(torch.ones(hidden_size))
103
+ self.variance_epsilon = eps
104
+
105
+ def forward(self, hidden_states):
106
+ input_dtype = hidden_states.dtype
107
+ hidden_states = hidden_states.to(torch.float32)
108
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
109
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
110
+ return self.weight * hidden_states.to(input_dtype)
111
+
112
+
113
+ try:
114
+ from apex.normalization import FusedRMSNorm
115
+
116
+ InternRMSNorm = FusedRMSNorm # noqa
117
+
118
+ logger.info('Discovered apex.normalization.FusedRMSNorm - will use it instead of InternRMSNorm')
119
+ except ImportError:
120
+ # using the normal InternRMSNorm
121
+ pass
122
+ except Exception:
123
+ logger.warning('discovered apex but it failed to load, falling back to InternRMSNorm')
124
+ pass
125
+
126
+
127
+ NORM2FN = {
128
+ 'rms_norm': InternRMSNorm,
129
+ 'layer_norm': nn.LayerNorm,
130
+ }
131
+
132
+
133
+ class InternVisionEmbeddings(nn.Module):
134
+ def __init__(self, config: InternVisionConfig):
135
+ super().__init__()
136
+ self.config = config
137
+ self.embed_dim = config.hidden_size
138
+ self.image_size = config.image_size
139
+ self.patch_size = config.patch_size
140
+
141
+ self.class_embedding = nn.Parameter(
142
+ torch.randn(1, 1, self.embed_dim),
143
+ )
144
+
145
+ self.patch_embedding = nn.Conv2d(
146
+ in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
147
+ )
148
+
149
+ self.num_patches = (self.image_size // self.patch_size) ** 2
150
+ self.num_positions = self.num_patches + 1
151
+
152
+ self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
153
+
154
+ def _get_pos_embed(self, pos_embed, H, W):
155
+ target_dtype = pos_embed.dtype
156
+ pos_embed = pos_embed.float().reshape(
157
+ 1, self.image_size // self.patch_size, self.image_size // self.patch_size, -1).permute(0, 3, 1, 2)
158
+ pos_embed = F.interpolate(pos_embed, size=(H, W), mode='bicubic', align_corners=False). \
159
+ reshape(1, -1, H * W).permute(0, 2, 1).to(target_dtype)
160
+ return pos_embed
161
+
162
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
163
+ target_dtype = self.patch_embedding.weight.dtype
164
+ patch_embeds = self.patch_embedding(pixel_values) # shape = [*, channel, width, height]
165
+ batch_size, _, height, width = patch_embeds.shape
166
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
167
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
168
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
169
+ position_embedding = torch.cat([
170
+ self.position_embedding[:, :1, :],
171
+ self._get_pos_embed(self.position_embedding[:, 1:, :], height, width)
172
+ ], dim=1)
173
+ embeddings = embeddings + position_embedding.to(target_dtype)
174
+ return embeddings
175
+
176
+
177
+ class InternAttention(nn.Module):
178
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
179
+
180
+ def __init__(self, config: InternVisionConfig):
181
+ super().__init__()
182
+ self.config = config
183
+ self.embed_dim = config.hidden_size
184
+ self.num_heads = config.num_attention_heads
185
+ self.use_flash_attn = config.use_flash_attn and has_flash_attn
186
+ if config.use_flash_attn and not has_flash_attn:
187
+ print('Warning: Flash Attention is not available, use_flash_attn is set to False.')
188
+ self.head_dim = self.embed_dim // self.num_heads
189
+ if self.head_dim * self.num_heads != self.embed_dim:
190
+ raise ValueError(
191
+ f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:'
192
+ f' {self.num_heads}).'
193
+ )
194
+
195
+ self.scale = self.head_dim ** -0.5
196
+ self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim, bias=config.qkv_bias)
197
+ self.attn_drop = nn.Dropout(config.attention_dropout)
198
+ self.proj_drop = nn.Dropout(config.dropout)
199
+
200
+ self.qk_normalization = config.qk_normalization
201
+
202
+ if self.qk_normalization:
203
+ self.q_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
204
+ self.k_norm = InternRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
205
+
206
+ if self.use_flash_attn:
207
+ self.inner_attn = FlashAttention(attention_dropout=config.attention_dropout)
208
+ self.proj = nn.Linear(self.embed_dim, self.embed_dim)
209
+
210
+ def _naive_attn(self, x):
211
+ B, N, C = x.shape
212
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
213
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
214
+
215
+ if self.qk_normalization:
216
+ B_, H_, N_, D_ = q.shape
217
+ q = self.q_norm(q.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
218
+ k = self.k_norm(k.transpose(1, 2).flatten(-2, -1)).view(B_, N_, H_, D_).transpose(1, 2)
219
+
220
+ attn = ((q * self.scale) @ k.transpose(-2, -1))
221
+ attn = attn.softmax(dim=-1)
222
+ attn = self.attn_drop(attn)
223
+
224
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
225
+ x = self.proj(x)
226
+ x = self.proj_drop(x)
227
+ return x
228
+
229
+ def _flash_attn(self, x, key_padding_mask=None, need_weights=False):
230
+ qkv = self.qkv(x)
231
+ qkv = rearrange(qkv, 'b s (three h d) -> b s three h d', three=3, h=self.num_heads)
232
+
233
+ if self.qk_normalization:
234
+ q, k, v = qkv.unbind(2)
235
+ q = self.q_norm(q.flatten(-2, -1)).view(q.shape)
236
+ k = self.k_norm(k.flatten(-2, -1)).view(k.shape)
237
+ qkv = torch.stack([q, k, v], dim=2)
238
+
239
+ context, _ = self.inner_attn(
240
+ qkv, key_padding_mask=key_padding_mask, need_weights=need_weights, causal=False
241
+ )
242
+ outs = self.proj(rearrange(context, 'b s h d -> b s (h d)'))
243
+ outs = self.proj_drop(outs)
244
+ return outs
245
+
246
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
247
+ x = self._naive_attn(hidden_states) if not self.use_flash_attn else self._flash_attn(hidden_states)
248
+ return x
249
+
250
+
251
+ class InternMLP(nn.Module):
252
+ def __init__(self, config: InternVisionConfig):
253
+ super().__init__()
254
+ self.config = config
255
+ self.act = ACT2FN[config.hidden_act]
256
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
257
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
258
+
259
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
260
+ hidden_states = self.fc1(hidden_states)
261
+ hidden_states = self.act(hidden_states)
262
+ hidden_states = self.fc2(hidden_states)
263
+ return hidden_states
264
+
265
+
266
+ class InternVisionEncoderLayer(nn.Module):
267
+ def __init__(self, config: InternVisionConfig, drop_path_rate: float):
268
+ super().__init__()
269
+ self.embed_dim = config.hidden_size
270
+ self.intermediate_size = config.intermediate_size
271
+ self.norm_type = config.norm_type
272
+
273
+ self.attn = InternAttention(config)
274
+ self.mlp = InternMLP(config)
275
+ self.norm1 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
276
+ self.norm2 = NORM2FN[self.norm_type](self.embed_dim, eps=config.layer_norm_eps)
277
+
278
+ self.ls1 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
279
+ self.ls2 = nn.Parameter(config.initializer_factor * torch.ones(self.embed_dim))
280
+ self.drop_path1 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
281
+ self.drop_path2 = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
282
+
283
+ def forward(
284
+ self,
285
+ hidden_states: torch.Tensor,
286
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
287
+ """
288
+ Args:
289
+ hidden_states (`Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]`): input to the layer of shape `(batch, seq_len, embed_dim)`
290
+ """
291
+ hidden_states = hidden_states + self.drop_path1(self.attn(self.norm1(hidden_states).to(hidden_states.dtype)) * self.ls1)
292
+
293
+ hidden_states = hidden_states + self.drop_path2(self.mlp(self.norm2(hidden_states).to(hidden_states.dtype)) * self.ls2)
294
+
295
+ return hidden_states
296
+
297
+
298
+ class InternVisionEncoder(nn.Module):
299
+ """
300
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
301
+ [`InternEncoderLayer`].
302
+
303
+ Args:
304
+ config (`InternConfig`):
305
+ The corresponding vision configuration for the `InternEncoder`.
306
+ """
307
+
308
+ def __init__(self, config: InternVisionConfig):
309
+ super().__init__()
310
+ self.config = config
311
+ # stochastic depth decay rule
312
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.num_hidden_layers)]
313
+ self.layers = nn.ModuleList([
314
+ InternVisionEncoderLayer(config, dpr[idx]) for idx in range(config.num_hidden_layers)])
315
+ self.gradient_checkpointing = True
316
+
317
+ def forward(
318
+ self,
319
+ inputs_embeds,
320
+ output_hidden_states: Optional[bool] = None,
321
+ return_dict: Optional[bool] = None,
322
+ ) -> Union[Tuple, BaseModelOutput]:
323
+ r"""
324
+ Args:
325
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
326
+ Embedded representation of the inputs. Should be float, not int tokens.
327
+ output_hidden_states (`bool`, *optional*):
328
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
329
+ for more detail.
330
+ return_dict (`bool`, *optional*):
331
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
332
+ """
333
+ output_hidden_states = (
334
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
335
+ )
336
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
337
+
338
+ encoder_states = () if output_hidden_states else None
339
+ hidden_states = inputs_embeds
340
+
341
+ for idx, encoder_layer in enumerate(self.layers):
342
+ if output_hidden_states:
343
+ encoder_states = encoder_states + (hidden_states,)
344
+ if self.gradient_checkpointing and self.training:
345
+ layer_outputs = torch.utils.checkpoint.checkpoint(
346
+ encoder_layer,
347
+ hidden_states)
348
+ else:
349
+ layer_outputs = encoder_layer(
350
+ hidden_states,
351
+ )
352
+ hidden_states = layer_outputs
353
+
354
+ if output_hidden_states:
355
+ encoder_states = encoder_states + (hidden_states,)
356
+
357
+ if not return_dict:
358
+ return tuple(v for v in [hidden_states, encoder_states] if v is not None)
359
+ return BaseModelOutput(
360
+ last_hidden_state=hidden_states, hidden_states=encoder_states
361
+ )
362
+
363
+
364
+ class InternVisionModel(PreTrainedModel):
365
+ main_input_name = 'pixel_values'
366
+ _supports_flash_attn_2 = True
367
+ config_class = InternVisionConfig
368
+ _no_split_modules = ['InternVisionEncoderLayer']
369
+
370
+ def __init__(self, config: InternVisionConfig):
371
+ super().__init__(config)
372
+ self.config = config
373
+
374
+ self.embeddings = InternVisionEmbeddings(config)
375
+ self.encoder = InternVisionEncoder(config)
376
+
377
+ def resize_pos_embeddings(self, old_size, new_size, patch_size):
378
+ pos_emb = self.embeddings.position_embedding
379
+ _, num_positions, embed_dim = pos_emb.shape
380
+ cls_emb = pos_emb[:, :1, :]
381
+ pos_emb = pos_emb[:, 1:, :].reshape(1, old_size // patch_size, old_size // patch_size, -1).permute(0, 3, 1, 2)
382
+ pos_emb = F.interpolate(pos_emb.float(), size=new_size // patch_size, mode='bicubic', align_corners=False)
383
+ pos_emb = pos_emb.to(cls_emb.dtype).reshape(1, embed_dim, -1).permute(0, 2, 1)
384
+ pos_emb = torch.cat([cls_emb, pos_emb], dim=1)
385
+ self.embeddings.position_embedding = nn.Parameter(pos_emb)
386
+ self.embeddings.image_size = new_size
387
+ logger.info('Resized position embeddings from {} to {}'.format(old_size, new_size))
388
+
389
+ def get_input_embeddings(self):
390
+ return self.embeddings
391
+
392
+ def forward(
393
+ self,
394
+ pixel_values: Optional[torch.FloatTensor] = None,
395
+ output_hidden_states: Optional[bool] = None,
396
+ return_dict: Optional[bool] = None,
397
+ pixel_embeds: Optional[torch.FloatTensor] = None,
398
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
399
+ output_hidden_states = (
400
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
401
+ )
402
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
403
+
404
+ if pixel_values is None and pixel_embeds is None:
405
+ raise ValueError('You have to specify pixel_values or pixel_embeds')
406
+
407
+ if pixel_embeds is not None:
408
+ hidden_states = pixel_embeds
409
+ else:
410
+ if len(pixel_values.shape) == 4:
411
+ hidden_states = self.embeddings(pixel_values)
412
+ else:
413
+ raise ValueError(f'wrong pixel_values size: {pixel_values.shape}')
414
+ encoder_outputs = self.encoder(
415
+ inputs_embeds=hidden_states,
416
+ output_hidden_states=output_hidden_states,
417
+ return_dict=return_dict,
418
+ )
419
+ last_hidden_state = encoder_outputs.last_hidden_state
420
+ pooled_output = last_hidden_state[:, 0, :]
421
+
422
+ if not return_dict:
423
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
424
+
425
+ return BaseModelOutputWithPooling(
426
+ last_hidden_state=last_hidden_state,
427
+ pooler_output=pooled_output,
428
+ hidden_states=encoder_outputs.hidden_states,
429
+ attentions=encoder_outputs.attentions,
430
+ )
modeling_internlm2.py ADDED
@@ -0,0 +1,1415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ PyTorch InternLM2 model."""
17
+ import math
18
+ import queue
19
+ import threading
20
+ import warnings
21
+ from typing import List, Optional, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from einops import rearrange
27
+ from torch import nn
28
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
29
+ from transformers.activations import ACT2FN
30
+ from transformers.modeling_outputs import (BaseModelOutputWithPast,
31
+ CausalLMOutputWithPast,
32
+ SequenceClassifierOutputWithPast)
33
+ from transformers.modeling_utils import PreTrainedModel
34
+ from transformers.utils import (add_start_docstrings,
35
+ add_start_docstrings_to_model_forward, logging,
36
+ replace_return_docstrings)
37
+
38
+ try:
39
+ from transformers.generation.streamers import BaseStreamer
40
+ except: # noqa # pylint: disable=bare-except
41
+ BaseStreamer = None
42
+
43
+ from .configuration_internlm2 import InternLM2Config
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+ _CONFIG_FOR_DOC = 'InternLM2Config'
48
+
49
+ flash_attn_func, flash_attn_varlen_func = None, None
50
+ pad_input, index_first_axis, unpad_input = None, None, None
51
+ try:
52
+ from flash_attn import flash_attn_func as _flash_attn_func
53
+ from flash_attn import flash_attn_varlen_func as _flash_attn_varlen_func
54
+ from flash_attn.bert_padding import index_first_axis as _index_first_axis
55
+ from flash_attn.bert_padding import pad_input as _pad_input
56
+ from flash_attn.bert_padding import unpad_input as _unpad_input
57
+
58
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
59
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
60
+ has_flash_attn = True
61
+ except:
62
+ has_flash_attn = False
63
+
64
+
65
+ def _import_flash_attn():
66
+ global flash_attn_func, flash_attn_varlen_func
67
+ global pad_input, index_first_axis, unpad_input
68
+ try:
69
+ from flash_attn import flash_attn_func as _flash_attn_func
70
+ from flash_attn import \
71
+ flash_attn_varlen_func as _flash_attn_varlen_func
72
+ from flash_attn.bert_padding import \
73
+ index_first_axis as _index_first_axis
74
+ from flash_attn.bert_padding import pad_input as _pad_input
75
+ from flash_attn.bert_padding import unpad_input as _unpad_input
76
+ flash_attn_func, flash_attn_varlen_func = _flash_attn_func, _flash_attn_varlen_func
77
+ pad_input, index_first_axis, unpad_input = _pad_input, _index_first_axis, _unpad_input
78
+ except ImportError:
79
+ raise ImportError('flash_attn is not installed.')
80
+
81
+
82
+ # Copied from transformers.models.llama.modeling_llama._get_unpad_data
83
+ def _get_unpad_data(attention_mask):
84
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
85
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
86
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
87
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
88
+ return (
89
+ indices,
90
+ cu_seqlens,
91
+ max_seqlen_in_batch,
92
+ )
93
+
94
+
95
+ # Copied from transformers.models.bart.modeling_bart._make_causal_mask
96
+ def _make_causal_mask(
97
+ input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
98
+ ):
99
+ """
100
+ Make causal mask used for bi-directional self-attention.
101
+ """
102
+ bsz, tgt_len = input_ids_shape
103
+ mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
104
+ mask_cond = torch.arange(mask.size(-1), device=device)
105
+ mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
106
+ mask = mask.to(dtype)
107
+
108
+ if past_key_values_length > 0:
109
+ mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
110
+ return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
111
+
112
+
113
+ # Copied from transformers.models.bart.modeling_bart._expand_mask
114
+ def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
115
+ """
116
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
117
+ """
118
+ bsz, src_len = mask.size()
119
+ tgt_len = tgt_len if tgt_len is not None else src_len
120
+
121
+ expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
122
+
123
+ inverted_mask = 1.0 - expanded_mask
124
+
125
+ return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
126
+
127
+
128
+ # Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->InternLM2
129
+ class InternLM2RMSNorm(nn.Module):
130
+ def __init__(self, hidden_size, eps=1e-6):
131
+ """
132
+ InternLM2RMSNorm is equivalent to T5LayerNorm
133
+ """
134
+ super().__init__()
135
+ self.weight = nn.Parameter(torch.ones(hidden_size))
136
+ self.variance_epsilon = eps
137
+
138
+ def forward(self, hidden_states):
139
+ input_dtype = hidden_states.dtype
140
+ hidden_states = hidden_states.to(torch.float32)
141
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
142
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
143
+ return self.weight * hidden_states.to(input_dtype)
144
+
145
+
146
+ # Copied from transformers.model.llama.modeling_llama.LlamaRotaryEmbedding with Llama->InternLM2
147
+ class InternLM2RotaryEmbedding(nn.Module):
148
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
149
+ super().__init__()
150
+
151
+ self.dim = dim
152
+ self.max_position_embeddings = max_position_embeddings
153
+ self.base = base
154
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
155
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
156
+
157
+ # Build here to make `torch.jit.trace` work.
158
+ self._set_cos_sin_cache(
159
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
160
+ )
161
+
162
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
163
+ self.max_seq_len_cached = seq_len
164
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
165
+
166
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
167
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
168
+ emb = torch.cat((freqs, freqs), dim=-1)
169
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
170
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
171
+
172
+ def forward(self, x, seq_len=None):
173
+ # x: [bs, num_attention_heads, seq_len, head_size]
174
+ if seq_len > self.max_seq_len_cached:
175
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.float32)
176
+
177
+ return (
178
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
179
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
180
+ )
181
+
182
+
183
+ # Copied from transformers.model.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding with Llama->InternLM2
184
+ class InternLM2LinearScalingRotaryEmbedding(InternLM2RotaryEmbedding):
185
+ """InternLM2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
186
+
187
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
188
+ self.scaling_factor = scaling_factor
189
+ super().__init__(dim, max_position_embeddings, base, device)
190
+
191
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
192
+ self.max_seq_len_cached = seq_len
193
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
194
+ t = t / self.scaling_factor
195
+
196
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
197
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
198
+ emb = torch.cat((freqs, freqs), dim=-1)
199
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
200
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
201
+
202
+
203
+ # Copied from transformers.model.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding with Llama->InternLM2
204
+ class InternLM2DynamicNTKScalingRotaryEmbedding(InternLM2RotaryEmbedding):
205
+ """InternLM2RotaryEmbedding extended with Dynamic NTK scaling.
206
+ Credits to the Reddit users /u/bloc97 and /u/emozilla.
207
+ """
208
+
209
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
210
+ self.scaling_factor = scaling_factor
211
+ super().__init__(dim, max_position_embeddings, base, device)
212
+
213
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
214
+ self.max_seq_len_cached = seq_len
215
+
216
+ if seq_len > self.max_position_embeddings:
217
+ base = self.base * (
218
+ (self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)
219
+ ) ** (self.dim / (self.dim - 2))
220
+ inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
221
+ self.register_buffer('inv_freq', inv_freq, persistent=False)
222
+
223
+ t = torch.arange(self.max_seq_len_cached, device=device).to(dtype=self.inv_freq.dtype)
224
+
225
+ freqs = torch.einsum('i,j->ij', t, self.inv_freq)
226
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
227
+ emb = torch.cat((freqs, freqs), dim=-1)
228
+ self.register_buffer('cos_cached', emb.cos().to(dtype), persistent=False)
229
+ self.register_buffer('sin_cached', emb.sin().to(dtype), persistent=False)
230
+
231
+
232
+ # Copied from transformers.model.llama.modeling_llama.rotate_half
233
+ def rotate_half(x):
234
+ """Rotates half the hidden dims of the input."""
235
+ x1 = x[..., : x.shape[-1] // 2]
236
+ x2 = x[..., x.shape[-1] // 2 :]
237
+ return torch.cat((-x2, x1), dim=-1)
238
+
239
+
240
+ # Copied from transformers.model.llama.modeling_llama.apply_rotary_pos_emb
241
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
242
+ """Applies Rotary Position Embedding to the query and key tensors."""
243
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
244
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
245
+ q_embed = (q * cos) + (rotate_half(q) * sin)
246
+ k_embed = (k * cos) + (rotate_half(k) * sin)
247
+ return q_embed, k_embed
248
+
249
+
250
+ class InternLM2MLP(nn.Module):
251
+ def __init__(self, config):
252
+ super().__init__()
253
+ self.config = config
254
+ self.hidden_size = config.hidden_size
255
+ self.intermediate_size = config.intermediate_size
256
+ self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
257
+ self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
258
+ self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
259
+ self.act_fn = ACT2FN[config.hidden_act]
260
+
261
+ def forward(self, x):
262
+ down_proj = self.w2(self.act_fn(self.w1(x)) * self.w3(x))
263
+
264
+ return down_proj
265
+
266
+
267
+ # Copied from transformers.model.llama.modeling_llama.repeat_kv
268
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
269
+ """
270
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
271
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
272
+ """
273
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
274
+ if n_rep == 1:
275
+ return hidden_states
276
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
277
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
278
+
279
+
280
+ # Modified from transformers.model.llama.modeling_llama.LlamaAttention
281
+ class InternLM2Attention(nn.Module):
282
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
283
+
284
+ def __init__(self, config: InternLM2Config):
285
+ super().__init__()
286
+ self.config = config
287
+ self.hidden_size = config.hidden_size
288
+ self.num_heads = config.num_attention_heads
289
+ self.head_dim = self.hidden_size // self.num_heads
290
+ self.num_key_value_heads = config.num_key_value_heads
291
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
292
+ self.max_position_embeddings = config.max_position_embeddings
293
+ self.is_causal = True
294
+
295
+ if (self.head_dim * self.num_heads) != self.hidden_size:
296
+ raise ValueError(
297
+ f'hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}'
298
+ f' and `num_heads`: {self.num_heads}).'
299
+ )
300
+
301
+ self.wqkv = nn.Linear(
302
+ self.hidden_size,
303
+ (self.num_heads + 2 * self.num_key_value_heads) * self.head_dim,
304
+ bias=config.bias,
305
+ )
306
+
307
+ self.wo = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
308
+ self._init_rope()
309
+
310
+ def _init_rope(self):
311
+ if self.config.rope_scaling is None:
312
+ self.rotary_emb = InternLM2RotaryEmbedding(
313
+ self.head_dim,
314
+ max_position_embeddings=self.max_position_embeddings,
315
+ base=self.config.rope_theta,
316
+ )
317
+ else:
318
+ scaling_type = self.config.rope_scaling['type']
319
+ scaling_factor = self.config.rope_scaling['factor']
320
+ if scaling_type == 'dynamic':
321
+ self.rotary_emb = InternLM2DynamicNTKScalingRotaryEmbedding(
322
+ self.head_dim,
323
+ max_position_embeddings=self.max_position_embeddings,
324
+ base=self.config.rope_theta,
325
+ scaling_factor=scaling_factor,
326
+ )
327
+ elif scaling_type == 'linear':
328
+ self.rotary_emb = InternLM2LinearScalingRotaryEmbedding(
329
+ self.head_dim,
330
+ max_position_embeddings=self.max_position_embeddings,
331
+ base=self.config.rope_theta,
332
+ scaling_factor=scaling_factor,
333
+ )
334
+ else:
335
+ raise ValueError("Currently we only support rotary embedding's type being 'dynamic' or 'linear'.")
336
+ return self.rotary_emb
337
+
338
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
339
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
340
+
341
+ def forward(
342
+ self,
343
+ hidden_states: torch.Tensor,
344
+ attention_mask: Optional[torch.Tensor] = None,
345
+ position_ids: Optional[torch.LongTensor] = None,
346
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
347
+ output_attentions: bool = False,
348
+ use_cache: bool = False,
349
+ **kwargs,
350
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
351
+ if 'padding_mask' in kwargs:
352
+ warnings.warn(
353
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
354
+ 'Please make sure use `attention_mask` instead.`'
355
+ )
356
+
357
+ bsz, q_len, _ = hidden_states.size()
358
+
359
+ qkv_states = self.wqkv(hidden_states)
360
+
361
+ qkv_states = rearrange(
362
+ qkv_states,
363
+ 'b q (h gs d) -> b q h gs d',
364
+ gs=2 + self.num_key_value_groups,
365
+ d=self.head_dim,
366
+ )
367
+
368
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
369
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
370
+ key_states = qkv_states[..., -2, :]
371
+ value_states = qkv_states[..., -1, :]
372
+
373
+ query_states = query_states.transpose(1, 2)
374
+ key_states = key_states.transpose(1, 2)
375
+ value_states = value_states.transpose(1, 2)
376
+
377
+ kv_seq_len = key_states.shape[-2]
378
+ if past_key_value is not None:
379
+ kv_seq_len += past_key_value[0].shape[-2]
380
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
381
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
382
+
383
+ if past_key_value is not None:
384
+ # reuse k, v, self_attention
385
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
386
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
387
+
388
+ past_key_value = (key_states, value_states) if use_cache else None
389
+
390
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
391
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
392
+
393
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
394
+
395
+ if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
396
+ raise ValueError(
397
+ f'Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is'
398
+ f' {attn_weights.size()}'
399
+ )
400
+
401
+ if attention_mask is not None:
402
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
403
+ raise ValueError(
404
+ f'Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}'
405
+ )
406
+ attn_weights = attn_weights + attention_mask
407
+
408
+ # upcast attention to fp32
409
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
410
+ attn_output = torch.matmul(attn_weights, value_states)
411
+
412
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
413
+ raise ValueError(
414
+ f'`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is'
415
+ f' {attn_output.size()}'
416
+ )
417
+
418
+ attn_output = attn_output.transpose(1, 2).contiguous()
419
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
420
+
421
+ attn_output = self.wo(attn_output)
422
+
423
+ if not output_attentions:
424
+ attn_weights = None
425
+
426
+ return attn_output, attn_weights, past_key_value
427
+
428
+
429
+ # Modified from transformers.model.llama.modeling_llama.InternLM2FlashAttention2
430
+ class InternLM2FlashAttention2(InternLM2Attention):
431
+ """
432
+ InternLM2 flash attention module. This module inherits from `InternLM2Attention` as the weights of the module stays
433
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
434
+ flash attention and deal with padding tokens in case the input contains any of them.
435
+ """
436
+
437
+ def forward(
438
+ self,
439
+ hidden_states: torch.Tensor,
440
+ attention_mask: Optional[torch.LongTensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
443
+ output_attentions: bool = False,
444
+ use_cache: bool = False,
445
+ **kwargs,
446
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
447
+ # InternLM2FlashAttention2 attention does not support output_attentions
448
+ if 'padding_mask' in kwargs:
449
+ warnings.warn(
450
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
451
+ 'Please make sure use `attention_mask` instead.`'
452
+ )
453
+
454
+ # overwrite attention_mask with padding_mask
455
+ attention_mask = kwargs.pop('padding_mask')
456
+
457
+ output_attentions = False
458
+
459
+ bsz, q_len, _ = hidden_states.size()
460
+
461
+ qkv_states = self.wqkv(hidden_states)
462
+
463
+ qkv_states = rearrange(
464
+ qkv_states,
465
+ 'b q (h gs d) -> b q h gs d',
466
+ gs=2 + self.num_key_value_groups,
467
+ d=self.head_dim,
468
+ )
469
+
470
+ query_states = qkv_states[..., : self.num_key_value_groups, :]
471
+ query_states = rearrange(query_states, 'b q h gs d -> b q (h gs) d')
472
+ key_states = qkv_states[..., -2, :]
473
+ value_states = qkv_states[..., -1, :]
474
+
475
+ query_states = query_states.transpose(1, 2)
476
+ key_states = key_states.transpose(1, 2)
477
+ value_states = value_states.transpose(1, 2)
478
+
479
+ kv_seq_len = key_states.shape[-2]
480
+ if past_key_value is not None:
481
+ kv_seq_len += past_key_value[0].shape[-2]
482
+
483
+ cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
484
+
485
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
486
+
487
+ if past_key_value is not None:
488
+ # reuse k, v, self_attention
489
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
490
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
491
+
492
+ past_key_value = (key_states, value_states) if use_cache else None
493
+
494
+ query_states = query_states.transpose(1, 2)
495
+ key_states = key_states.transpose(1, 2)
496
+ value_states = value_states.transpose(1, 2)
497
+
498
+ attn_output = self._flash_attention_forward(
499
+ query_states, key_states, value_states, attention_mask, q_len
500
+ )
501
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
502
+ attn_output = self.wo(attn_output)
503
+
504
+ if not output_attentions:
505
+ attn_weights = None
506
+
507
+ return attn_output, attn_weights, past_key_value
508
+
509
+ def _flash_attention_forward(
510
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
511
+ ):
512
+ """
513
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
514
+ first unpad the input, then computes the attention scores and pad the final attention scores.
515
+
516
+ Args:
517
+ query_states (`torch.Tensor`):
518
+ Input query states to be passed to Flash Attention API
519
+ key_states (`torch.Tensor`):
520
+ Input key states to be passed to Flash Attention API
521
+ value_states (`torch.Tensor`):
522
+ Input value states to be passed to Flash Attention API
523
+ attention_mask (`torch.Tensor`):
524
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
525
+ position of padding tokens and 1 for the position of non-padding tokens.
526
+ dropout (`int`, *optional*):
527
+ Attention dropout
528
+ softmax_scale (`float`, *optional*):
529
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
530
+ """
531
+ # Contains at least one padding token in the sequence
532
+ causal = self.is_causal and query_length != 1
533
+ if attention_mask is not None:
534
+ batch_size = query_states.shape[0]
535
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._unpad_input(
536
+ query_states, key_states, value_states, attention_mask, query_length
537
+ )
538
+
539
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
540
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
541
+
542
+ attn_output_unpad = flash_attn_varlen_func(
543
+ query_states,
544
+ key_states,
545
+ value_states,
546
+ cu_seqlens_q=cu_seqlens_q,
547
+ cu_seqlens_k=cu_seqlens_k,
548
+ max_seqlen_q=max_seqlen_in_batch_q,
549
+ max_seqlen_k=max_seqlen_in_batch_k,
550
+ dropout_p=dropout,
551
+ softmax_scale=softmax_scale,
552
+ causal=causal,
553
+ )
554
+
555
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
556
+ else:
557
+ attn_output = flash_attn_func(
558
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
559
+ )
560
+
561
+ return attn_output
562
+
563
+ def _unpad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
564
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
565
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
566
+
567
+ key_layer = index_first_axis(
568
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
569
+ )
570
+ value_layer = index_first_axis(
571
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
572
+ )
573
+
574
+ if query_length == kv_seq_len:
575
+ query_layer = index_first_axis(
576
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
577
+ )
578
+ cu_seqlens_q = cu_seqlens_k
579
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
580
+ indices_q = indices_k
581
+ elif query_length == 1:
582
+ max_seqlen_in_batch_q = 1
583
+ cu_seqlens_q = torch.arange(
584
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
585
+ ) # There is a memcpy here, that is very bad.
586
+ indices_q = cu_seqlens_q[:-1]
587
+ query_layer = query_layer.squeeze(1)
588
+ else:
589
+ # The -q_len: slice assumes left padding.
590
+ attention_mask = attention_mask[:, -query_length:]
591
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
592
+
593
+ return (
594
+ query_layer,
595
+ key_layer,
596
+ value_layer,
597
+ indices_q.to(torch.int64),
598
+ (cu_seqlens_q, cu_seqlens_k),
599
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
600
+ )
601
+
602
+
603
+ INTERNLM2_ATTENTION_CLASSES = {
604
+ 'eager': InternLM2Attention,
605
+ 'flash_attention_2': InternLM2FlashAttention2,
606
+ }
607
+
608
+
609
+ # Modified from transformers.model.llama.modeling_llama.LlamaDecoderLayer
610
+ class InternLM2DecoderLayer(nn.Module):
611
+ def __init__(self, config: InternLM2Config):
612
+ super().__init__()
613
+ self.hidden_size = config.hidden_size
614
+
615
+ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config)
616
+
617
+ self.feed_forward = InternLM2MLP(config)
618
+ self.attention_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
619
+ self.ffn_norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
620
+
621
+ def forward(
622
+ self,
623
+ hidden_states: torch.Tensor,
624
+ attention_mask: Optional[torch.Tensor] = None,
625
+ position_ids: Optional[torch.LongTensor] = None,
626
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
627
+ output_attentions: Optional[bool] = False,
628
+ use_cache: Optional[bool] = False,
629
+ **kwargs,
630
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
631
+ """
632
+ Args:
633
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
634
+ attention_mask (`torch.FloatTensor`, *optional*):
635
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
636
+ query_sequence_length, key_sequence_length)` if default attention is used.
637
+ output_attentions (`bool`, *optional*):
638
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
639
+ returned tensors for more detail.
640
+ use_cache (`bool`, *optional*):
641
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
642
+ (see `past_key_values`).
643
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
644
+ """
645
+ if 'padding_mask' in kwargs:
646
+ warnings.warn(
647
+ 'Passing `padding_mask` is deprecated and will be removed in v4.37. '
648
+ 'Please make sure use `attention_mask` instead.`'
649
+ )
650
+
651
+ residual = hidden_states
652
+
653
+ hidden_states = self.attention_norm(hidden_states)
654
+
655
+ # Self Attention
656
+ hidden_states, self_attn_weights, present_key_value = self.attention(
657
+ hidden_states=hidden_states,
658
+ attention_mask=attention_mask,
659
+ position_ids=position_ids,
660
+ past_key_value=past_key_value,
661
+ output_attentions=output_attentions,
662
+ use_cache=use_cache,
663
+ **kwargs,
664
+ )
665
+ hidden_states = residual + hidden_states
666
+
667
+ # Fully Connected
668
+ residual = hidden_states
669
+ hidden_states = self.ffn_norm(hidden_states)
670
+ hidden_states = self.feed_forward(hidden_states)
671
+ hidden_states = residual + hidden_states
672
+
673
+ outputs = (hidden_states,)
674
+
675
+ if output_attentions:
676
+ outputs += (self_attn_weights,)
677
+
678
+ if use_cache:
679
+ outputs += (present_key_value,)
680
+
681
+ return outputs
682
+
683
+
684
+ InternLM2_START_DOCSTRING = r"""
685
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
686
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
687
+ etc.)
688
+
689
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
690
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
691
+ and behavior.
692
+
693
+ Parameters:
694
+ config ([`InternLM2Config`]):
695
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
696
+ load the weights associated with the model, only the configuration. Check out the
697
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
698
+ """
699
+
700
+
701
+ # Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->InternLM2
702
+ @add_start_docstrings(
703
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
704
+ InternLM2_START_DOCSTRING,
705
+ )
706
+ class InternLM2PreTrainedModel(PreTrainedModel):
707
+ config_class = InternLM2Config
708
+ base_model_prefix = 'model'
709
+ supports_gradient_checkpointing = True
710
+ _no_split_modules = ['InternLM2DecoderLayer']
711
+ _skip_keys_device_placement = 'past_key_values'
712
+ _supports_flash_attn_2 = True
713
+
714
+ def _init_weights(self, module):
715
+ std = self.config.initializer_range
716
+ if isinstance(module, nn.Linear):
717
+ module.weight.data.normal_(mean=0.0, std=std)
718
+ if module.bias is not None:
719
+ module.bias.data.zero_()
720
+ elif isinstance(module, nn.Embedding):
721
+ module.weight.data.normal_(mean=0.0, std=std)
722
+ if module.padding_idx is not None:
723
+ module.weight.data[module.padding_idx].zero_()
724
+
725
+
726
+ InternLM2_INPUTS_DOCSTRING = r"""
727
+ Args:
728
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
729
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
730
+ it.
731
+
732
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
733
+ [`PreTrainedTokenizer.__call__`] for details.
734
+
735
+ [What are input IDs?](../glossary#input-ids)
736
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
737
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
738
+
739
+ - 1 for tokens that are **not masked**,
740
+ - 0 for tokens that are **masked**.
741
+
742
+ [What are attention masks?](../glossary#attention-mask)
743
+
744
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
745
+ [`PreTrainedTokenizer.__call__`] for details.
746
+
747
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
748
+ `past_key_values`).
749
+
750
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
751
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
752
+ information on the default strategy.
753
+
754
+ - 1 indicates the head is **not masked**,
755
+ - 0 indicates the head is **masked**.
756
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
757
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
758
+ config.n_positions - 1]`.
759
+
760
+ [What are position IDs?](../glossary#position-ids)
761
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or
762
+ when `config.use_cache=True`):
763
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
764
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
765
+ `(batch_size, num_heads, decoder_sequence_length, embed_size_per_head)`.
766
+
767
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
768
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
769
+
770
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
771
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
772
+ of shape `(batch_size, sequence_length)`.
773
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
774
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
775
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
776
+ model's internal embedding lookup matrix.
777
+ use_cache (`bool`, *optional*):
778
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
779
+ `past_key_values`).
780
+ output_attentions (`bool`, *optional*):
781
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
782
+ tensors for more detail.
783
+ output_hidden_states (`bool`, *optional*):
784
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
785
+ more detail.
786
+ return_dict (`bool`, *optional*):
787
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
788
+ """
789
+
790
+
791
+ # Modified from transformers.model.llama.modeling_llama.LlamaModel
792
+ @add_start_docstrings(
793
+ 'The bare InternLM2 Model outputting raw hidden-states without any specific head on top.',
794
+ InternLM2_START_DOCSTRING,
795
+ )
796
+ class InternLM2Model(InternLM2PreTrainedModel):
797
+ """
798
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM2DecoderLayer`]
799
+
800
+ Args:
801
+ config: InternLM2Config
802
+ """
803
+
804
+ _auto_class = 'AutoModel'
805
+
806
+ def __init__(self, config: InternLM2Config):
807
+ super().__init__(config)
808
+ self.padding_idx = config.pad_token_id
809
+ self.vocab_size = config.vocab_size
810
+ self.config = config
811
+ if not has_flash_attn:
812
+ self.config.attn_implementation = 'eager'
813
+ print('Warning: Flash attention is not available, using eager attention instead.')
814
+
815
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
816
+
817
+ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)])
818
+ self.norm = InternLM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
819
+
820
+ self.gradient_checkpointing = False
821
+ # Initialize weights and apply final processing
822
+ self.post_init()
823
+
824
+ def get_input_embeddings(self):
825
+ return self.tok_embeddings
826
+
827
+ def set_input_embeddings(self, value):
828
+ self.tok_embeddings = value
829
+
830
+ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
831
+ # create causal mask
832
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
833
+ combined_attention_mask = None
834
+ if input_shape[-1] > 1:
835
+ combined_attention_mask = _make_causal_mask(
836
+ input_shape,
837
+ inputs_embeds.dtype,
838
+ device=inputs_embeds.device,
839
+ past_key_values_length=past_key_values_length,
840
+ )
841
+
842
+ if attention_mask is not None:
843
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
844
+ expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
845
+ inputs_embeds.device
846
+ )
847
+ combined_attention_mask = (
848
+ expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
849
+ )
850
+
851
+ return combined_attention_mask
852
+
853
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
854
+ def forward(
855
+ self,
856
+ input_ids: torch.LongTensor = None,
857
+ attention_mask: Optional[torch.Tensor] = None,
858
+ position_ids: Optional[torch.LongTensor] = None,
859
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
860
+ inputs_embeds: Optional[torch.FloatTensor] = None,
861
+ use_cache: Optional[bool] = None,
862
+ output_attentions: Optional[bool] = None,
863
+ output_hidden_states: Optional[bool] = None,
864
+ return_dict: Optional[bool] = None,
865
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
866
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
867
+ output_hidden_states = (
868
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
869
+ )
870
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
871
+
872
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
873
+
874
+ if self.config.attn_implementation == 'flash_attention_2':
875
+ _import_flash_attn()
876
+
877
+ # retrieve input_ids and inputs_embeds
878
+ if input_ids is not None and inputs_embeds is not None:
879
+ raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
880
+ elif input_ids is not None:
881
+ batch_size, seq_length = input_ids.shape[:2]
882
+ elif inputs_embeds is not None:
883
+ batch_size, seq_length = inputs_embeds.shape[:2]
884
+ else:
885
+ raise ValueError('You have to specify either input_ids or inputs_embeds')
886
+
887
+ seq_length_with_past = seq_length
888
+ past_key_values_length = 0
889
+ if past_key_values is not None:
890
+ past_key_values_length = past_key_values[0][0].shape[2]
891
+ seq_length_with_past = seq_length_with_past + past_key_values_length
892
+
893
+ if position_ids is None:
894
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
895
+ position_ids = torch.arange(
896
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
897
+ )
898
+ position_ids = position_ids.unsqueeze(0)
899
+
900
+ if inputs_embeds is None:
901
+ inputs_embeds = self.tok_embeddings(input_ids)
902
+
903
+ if self.config.attn_implementation == 'flash_attention_2':
904
+ # 2d mask is passed through the layers
905
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
906
+ else:
907
+ if attention_mask is None:
908
+ attention_mask = torch.ones(
909
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
910
+ )
911
+ attention_mask = self._prepare_decoder_attention_mask(
912
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
913
+ )
914
+
915
+ # embed positions
916
+ hidden_states = inputs_embeds
917
+
918
+ if self.gradient_checkpointing and self.training:
919
+ if use_cache:
920
+ logger.warning_once(
921
+ '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...'
922
+ )
923
+ use_cache = False
924
+
925
+ # decoder layers
926
+ all_hidden_states = () if output_hidden_states else None
927
+ all_self_attns = () if output_attentions else None
928
+ next_decoder_cache = () if use_cache else None
929
+
930
+ for idx, decoder_layer in enumerate(self.layers):
931
+ if output_hidden_states:
932
+ all_hidden_states += (hidden_states,)
933
+
934
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
935
+
936
+ if self.gradient_checkpointing and self.training:
937
+
938
+ def create_custom_forward(module):
939
+ def custom_forward(*inputs):
940
+ # None for past_key_value
941
+ return module(*inputs, output_attentions, None)
942
+
943
+ return custom_forward
944
+
945
+ layer_outputs = torch.utils.checkpoint.checkpoint(
946
+ create_custom_forward(decoder_layer),
947
+ hidden_states,
948
+ attention_mask,
949
+ position_ids,
950
+ None,
951
+ )
952
+ else:
953
+ layer_outputs = decoder_layer(
954
+ hidden_states,
955
+ attention_mask=attention_mask,
956
+ position_ids=position_ids,
957
+ past_key_value=past_key_value,
958
+ output_attentions=output_attentions,
959
+ use_cache=use_cache,
960
+ )
961
+
962
+ hidden_states = layer_outputs[0]
963
+
964
+ if use_cache:
965
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
966
+
967
+ if output_attentions:
968
+ all_self_attns += (layer_outputs[1],)
969
+
970
+ hidden_states = self.norm(hidden_states)
971
+
972
+ # add hidden states from the last decoder layer
973
+ if output_hidden_states:
974
+ all_hidden_states += (hidden_states,)
975
+
976
+ next_cache = next_decoder_cache if use_cache else None
977
+ if not return_dict:
978
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
979
+ return BaseModelOutputWithPast(
980
+ last_hidden_state=hidden_states,
981
+ past_key_values=next_cache,
982
+ hidden_states=all_hidden_states,
983
+ attentions=all_self_attns,
984
+ )
985
+
986
+
987
+ # Modified from transformers.model.llama.modeling_llama.LlamaForCausalLM
988
+ class InternLM2ForCausalLM(InternLM2PreTrainedModel):
989
+ _auto_class = 'AutoModelForCausalLM'
990
+
991
+ _tied_weights_keys = ['output.weight']
992
+
993
+ def __init__(self, config):
994
+ super().__init__(config)
995
+ self.model = InternLM2Model(config)
996
+ self.vocab_size = config.vocab_size
997
+ self.output = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
998
+
999
+ # Initialize weights and apply final processing
1000
+ self.post_init()
1001
+
1002
+ def get_input_embeddings(self):
1003
+ return self.model.tok_embeddings
1004
+
1005
+ def set_input_embeddings(self, value):
1006
+ self.model.tok_embeddings = value
1007
+
1008
+ def get_output_embeddings(self):
1009
+ return self.output
1010
+
1011
+ def set_output_embeddings(self, new_embeddings):
1012
+ self.output = new_embeddings
1013
+
1014
+ def set_decoder(self, decoder):
1015
+ self.model = decoder
1016
+
1017
+ def get_decoder(self):
1018
+ return self.model
1019
+
1020
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1021
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1022
+ def forward(
1023
+ self,
1024
+ input_ids: torch.LongTensor = None,
1025
+ attention_mask: Optional[torch.Tensor] = None,
1026
+ position_ids: Optional[torch.LongTensor] = None,
1027
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1028
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1029
+ labels: Optional[torch.LongTensor] = None,
1030
+ use_cache: Optional[bool] = None,
1031
+ output_attentions: Optional[bool] = None,
1032
+ output_hidden_states: Optional[bool] = None,
1033
+ return_dict: Optional[bool] = None,
1034
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1035
+ r"""
1036
+ Args:
1037
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1038
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1039
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1040
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1041
+
1042
+ Returns:
1043
+
1044
+ Example:
1045
+
1046
+ ```python
1047
+ >>> from transformers import AutoTokenizer, InternLM2ForCausalLM
1048
+
1049
+ >>> model = InternLM2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
1050
+ >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
1051
+
1052
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1053
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1054
+
1055
+ >>> # Generate
1056
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1057
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1058
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1059
+ ```"""
1060
+
1061
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1062
+ output_hidden_states = (
1063
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1064
+ )
1065
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1066
+
1067
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1068
+ outputs = self.model(
1069
+ input_ids=input_ids,
1070
+ attention_mask=attention_mask,
1071
+ position_ids=position_ids,
1072
+ past_key_values=past_key_values,
1073
+ inputs_embeds=inputs_embeds,
1074
+ use_cache=use_cache,
1075
+ output_attentions=output_attentions,
1076
+ output_hidden_states=output_hidden_states,
1077
+ return_dict=return_dict,
1078
+ )
1079
+
1080
+ hidden_states = outputs[0]
1081
+ logits = self.output(hidden_states)
1082
+ logits = logits.float()
1083
+
1084
+ loss = None
1085
+ if labels is not None:
1086
+ # Shift so that tokens < n predict n
1087
+ shift_logits = logits[..., :-1, :].contiguous()
1088
+ shift_labels = labels[..., 1:].contiguous()
1089
+ # Flatten the tokens
1090
+ loss_fct = CrossEntropyLoss()
1091
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1092
+ shift_labels = shift_labels.view(-1)
1093
+ # Enable model parallelism
1094
+ shift_labels = shift_labels.to(shift_logits.device)
1095
+ loss = loss_fct(shift_logits, shift_labels)
1096
+
1097
+ if not return_dict:
1098
+ output = (logits,) + outputs[1:]
1099
+ return (loss,) + output if loss is not None else output
1100
+
1101
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
1102
+ output = CausalLMOutputWithPast(
1103
+ loss=loss,
1104
+ logits=logits,
1105
+ past_key_values=outputs.past_key_values,
1106
+ hidden_states=outputs.hidden_states,
1107
+ attentions=outputs.attentions,
1108
+ )
1109
+ output['logits'] = output['logits'].to(device)
1110
+ return output
1111
+
1112
+ def prepare_inputs_for_generation(
1113
+ self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
1114
+ ):
1115
+ if past_key_values is not None:
1116
+ past_length = past_key_values[0][0].shape[2]
1117
+
1118
+ # Some generation methods already pass only the last input ID
1119
+ if input_ids.shape[1] > past_length:
1120
+ remove_prefix_length = past_length
1121
+ else:
1122
+ # Default to old behavior: keep only final ID
1123
+ remove_prefix_length = input_ids.shape[1] - 1
1124
+
1125
+ input_ids = input_ids[:, remove_prefix_length:]
1126
+
1127
+ position_ids = kwargs.get('position_ids', None)
1128
+ if attention_mask is not None and position_ids is None:
1129
+ # create position_ids on the fly for batch generation
1130
+ position_ids = attention_mask.long().cumsum(-1) - 1
1131
+ position_ids.masked_fill_(attention_mask == 0, 1)
1132
+ if past_key_values:
1133
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1134
+
1135
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1136
+ if inputs_embeds is not None and past_key_values is None:
1137
+ model_inputs = {'inputs_embeds': inputs_embeds}
1138
+ else:
1139
+ model_inputs = {'input_ids': input_ids}
1140
+
1141
+ model_inputs.update(
1142
+ {
1143
+ 'position_ids': position_ids,
1144
+ 'past_key_values': past_key_values,
1145
+ 'use_cache': kwargs.get('use_cache'),
1146
+ 'attention_mask': attention_mask,
1147
+ }
1148
+ )
1149
+ return model_inputs
1150
+
1151
+ @staticmethod
1152
+ def _reorder_cache(past_key_values, beam_idx):
1153
+ reordered_past = ()
1154
+ for layer_past in past_key_values:
1155
+ reordered_past += (
1156
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
1157
+ )
1158
+ return reordered_past
1159
+
1160
+ def build_inputs(self, tokenizer, query: str, history: List[Tuple[str, str]] = [], meta_instruction=''):
1161
+ if tokenizer.add_bos_token:
1162
+ prompt = ''
1163
+ else:
1164
+ prompt = tokenizer.bos_token
1165
+ if meta_instruction:
1166
+ prompt += f"""<|im_start|>system\n{meta_instruction}<|im_end|>\n"""
1167
+ for record in history:
1168
+ prompt += f"""<|im_start|>user\n{record[0]}<|im_end|>\n<|im_start|>assistant\n{record[1]}<|im_end|>\n"""
1169
+ prompt += f"""<|im_start|>user\n{query}<|im_end|>\n<|im_start|>assistant\n"""
1170
+ return tokenizer([prompt], return_tensors='pt')
1171
+
1172
+ @torch.no_grad()
1173
+ def chat(
1174
+ self,
1175
+ tokenizer,
1176
+ query: str,
1177
+ history: List[Tuple[str, str]] = [],
1178
+ streamer: Optional[BaseStreamer] = None,
1179
+ max_new_tokens: int = 1024,
1180
+ do_sample: bool = True,
1181
+ temperature: float = 0.8,
1182
+ top_p: float = 0.8,
1183
+ meta_instruction: str = 'You are an AI assistant whose name is InternLM (书生·浦语).\n'
1184
+ '- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n'
1185
+ '- InternLM (书生·浦语) can understand and communicate fluently in the language chosen by the user such as English and 中文.',
1186
+ **kwargs,
1187
+ ):
1188
+ inputs = self.build_inputs(tokenizer, query, history, meta_instruction)
1189
+ inputs = {k: v.to(self.device) for k, v in inputs.items() if torch.is_tensor(v)}
1190
+ # also add end-of-assistant token in eos token id to avoid unnecessary generation
1191
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.convert_tokens_to_ids(['<|im_end|>'])[0]]
1192
+ outputs = self.generate(
1193
+ **inputs,
1194
+ streamer=streamer,
1195
+ max_new_tokens=max_new_tokens,
1196
+ do_sample=do_sample,
1197
+ temperature=temperature,
1198
+ top_p=top_p,
1199
+ eos_token_id=eos_token_id,
1200
+ **kwargs,
1201
+ )
1202
+ outputs = outputs[0].cpu().tolist()[len(inputs['input_ids'][0]) :]
1203
+ response = tokenizer.decode(outputs, skip_special_tokens=True)
1204
+ response = response.split('<|im_end|>')[0]
1205
+ history = history + [(query, response)]
1206
+ return response, history
1207
+
1208
+ @torch.no_grad()
1209
+ def stream_chat(
1210
+ self,
1211
+ tokenizer,
1212
+ query: str,
1213
+ history: List[Tuple[str, str]] = [],
1214
+ max_new_tokens: int = 1024,
1215
+ do_sample: bool = True,
1216
+ temperature: float = 0.8,
1217
+ top_p: float = 0.8,
1218
+ **kwargs,
1219
+ ):
1220
+ """
1221
+ Return a generator in format: (response, history)
1222
+ Eg.
1223
+ ('你好,有什么可以帮助您的吗', [('你好', '你好,有什么可以帮助您的吗')])
1224
+ ('你好,有什么可以帮助您的吗?', [('你好', '你好,有什么可以帮助您的吗?')])
1225
+ """
1226
+ if BaseStreamer is None:
1227
+ raise ModuleNotFoundError(
1228
+ 'The version of `transformers` is too low. Please make sure '
1229
+ 'that you have installed `transformers>=4.28.0`.'
1230
+ )
1231
+
1232
+ response_queue = queue.Queue(maxsize=20)
1233
+
1234
+ class ChatStreamer(BaseStreamer):
1235
+ def __init__(self, tokenizer) -> None:
1236
+ super().__init__()
1237
+ self.tokenizer = tokenizer
1238
+ self.queue = response_queue
1239
+ self.query = query
1240
+ self.history = history
1241
+ self.response = ''
1242
+ self.cache = []
1243
+ self.received_inputs = False
1244
+ self.queue.put((self.response, history + [(self.query, self.response)]))
1245
+
1246
+ def put(self, value):
1247
+ if len(value.shape) > 1 and value.shape[0] > 1:
1248
+ raise ValueError('ChatStreamer only supports batch size 1')
1249
+ elif len(value.shape) > 1:
1250
+ value = value[0]
1251
+
1252
+ if not self.received_inputs:
1253
+ # The first received value is input_ids, ignore here
1254
+ self.received_inputs = True
1255
+ return
1256
+
1257
+ self.cache.extend(value.tolist())
1258
+ token = self.tokenizer.decode(self.cache, skip_special_tokens=True)
1259
+ if token.strip() != '<|im_end|>':
1260
+ self.response = self.response + token
1261
+ history = self.history + [(self.query, self.response)]
1262
+ self.queue.put((self.response, history))
1263
+ self.cache = []
1264
+ else:
1265
+ self.end()
1266
+
1267
+ def end(self):
1268
+ self.queue.put(None)
1269
+
1270
+ def stream_producer():
1271
+ return self.chat(
1272
+ tokenizer=tokenizer,
1273
+ query=query,
1274
+ streamer=ChatStreamer(tokenizer=tokenizer),
1275
+ history=history,
1276
+ max_new_tokens=max_new_tokens,
1277
+ do_sample=do_sample,
1278
+ temperature=temperature,
1279
+ top_p=top_p,
1280
+ **kwargs,
1281
+ )
1282
+
1283
+ def consumer():
1284
+ producer = threading.Thread(target=stream_producer)
1285
+ producer.start()
1286
+ while True:
1287
+ res = response_queue.get()
1288
+ if res is None:
1289
+ return
1290
+ yield res
1291
+
1292
+ return consumer()
1293
+
1294
+
1295
+ # Copied from transformers.model.llama.modeling_llama.LlamaForSequenceClassification with Llama->InternLM2
1296
+ @add_start_docstrings(
1297
+ """
1298
+ The InternLM2 Model transformer with a sequence classification head on top (linear layer).
1299
+
1300
+ [`InternLM2ForSequenceClassification`] uses the last token in order to do the classification,
1301
+ as other causal models (e.g. GPT-2) do.
1302
+
1303
+ Since it does classification on the last token, it requires to know the position of the last token. If a
1304
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
1305
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
1306
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
1307
+ each row of the batch).
1308
+ """,
1309
+ InternLM2_START_DOCSTRING,
1310
+ )
1311
+ class InternLM2ForSequenceClassification(InternLM2PreTrainedModel):
1312
+ def __init__(self, config):
1313
+ super().__init__(config)
1314
+ self.num_labels = config.num_labels
1315
+ self.model = InternLM2Model(config)
1316
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1317
+
1318
+ # Initialize weights and apply final processing
1319
+ self.post_init()
1320
+
1321
+ def get_input_embeddings(self):
1322
+ return self.model.tok_embeddings
1323
+
1324
+ def set_input_embeddings(self, value):
1325
+ self.model.tok_embeddings = value
1326
+
1327
+ @add_start_docstrings_to_model_forward(InternLM2_INPUTS_DOCSTRING)
1328
+ def forward(
1329
+ self,
1330
+ input_ids: torch.LongTensor = None,
1331
+ attention_mask: Optional[torch.Tensor] = None,
1332
+ position_ids: Optional[torch.LongTensor] = None,
1333
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1334
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1335
+ labels: Optional[torch.LongTensor] = None,
1336
+ use_cache: Optional[bool] = None,
1337
+ output_attentions: Optional[bool] = None,
1338
+ output_hidden_states: Optional[bool] = None,
1339
+ return_dict: Optional[bool] = None,
1340
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1341
+ r"""
1342
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1343
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1344
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1345
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1346
+ """
1347
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1348
+
1349
+ transformer_outputs = self.model(
1350
+ input_ids,
1351
+ attention_mask=attention_mask,
1352
+ position_ids=position_ids,
1353
+ past_key_values=past_key_values,
1354
+ inputs_embeds=inputs_embeds,
1355
+ use_cache=use_cache,
1356
+ output_attentions=output_attentions,
1357
+ output_hidden_states=output_hidden_states,
1358
+ return_dict=return_dict,
1359
+ )
1360
+ hidden_states = transformer_outputs[0]
1361
+ logits = self.score(hidden_states)
1362
+
1363
+ if input_ids is not None:
1364
+ batch_size = input_ids.shape[0]
1365
+ else:
1366
+ batch_size = inputs_embeds.shape[0]
1367
+
1368
+ if self.config.pad_token_id is None and batch_size != 1:
1369
+ raise ValueError('Cannot handle batch sizes > 1 if no padding token is defined.')
1370
+ if self.config.pad_token_id is None:
1371
+ sequence_lengths = -1
1372
+ else:
1373
+ if input_ids is not None:
1374
+ sequence_lengths = (torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1).to(
1375
+ logits.device
1376
+ )
1377
+ else:
1378
+ sequence_lengths = -1
1379
+
1380
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1381
+
1382
+ loss = None
1383
+ if labels is not None:
1384
+ labels = labels.to(logits.device)
1385
+ if self.config.problem_type is None:
1386
+ if self.num_labels == 1:
1387
+ self.config.problem_type = 'regression'
1388
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1389
+ self.config.problem_type = 'single_label_classification'
1390
+ else:
1391
+ self.config.problem_type = 'multi_label_classification'
1392
+
1393
+ if self.config.problem_type == 'regression':
1394
+ loss_fct = MSELoss()
1395
+ if self.num_labels == 1:
1396
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1397
+ else:
1398
+ loss = loss_fct(pooled_logits, labels)
1399
+ elif self.config.problem_type == 'single_label_classification':
1400
+ loss_fct = CrossEntropyLoss()
1401
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1402
+ elif self.config.problem_type == 'multi_label_classification':
1403
+ loss_fct = BCEWithLogitsLoss()
1404
+ loss = loss_fct(pooled_logits, labels)
1405
+ if not return_dict:
1406
+ output = (pooled_logits,) + transformer_outputs[1:]
1407
+ return ((loss,) + output) if loss is not None else output
1408
+
1409
+ return SequenceClassifierOutputWithPast(
1410
+ loss=loss,
1411
+ logits=pooled_logits,
1412
+ past_key_values=transformer_outputs.past_key_values,
1413
+ hidden_states=transformer_outputs.hidden_states,
1414
+ attentions=transformer_outputs.attentions,
1415
+ )
modeling_internvl_chat.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # InternVL
3
+ # Copyright (c) 2024 OpenGVLab
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+
7
+ import warnings
8
+ from typing import List, Optional, Tuple, Union
9
+
10
+ import torch.utils.checkpoint
11
+ import transformers
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss
14
+ from transformers import (AutoModel, GenerationConfig, LlamaForCausalLM,
15
+ LlamaTokenizer)
16
+ from transformers.modeling_outputs import CausalLMOutputWithPast
17
+ from transformers.modeling_utils import PreTrainedModel
18
+ from transformers.utils import ModelOutput, logging
19
+
20
+ from .configuration_internvl_chat import InternVLChatConfig
21
+ from .conversation import get_conv_template
22
+ from .modeling_intern_vit import InternVisionModel, has_flash_attn
23
+ from .modeling_internlm2 import InternLM2ForCausalLM
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ def version_cmp(v1, v2, op='eq'):
29
+ import operator
30
+
31
+ from packaging import version
32
+ op_func = getattr(operator, op)
33
+ return op_func(version.parse(v1), version.parse(v2))
34
+
35
+
36
+ class InternVLChatModel(PreTrainedModel):
37
+ config_class = InternVLChatConfig
38
+ main_input_name = 'pixel_values'
39
+ base_model_prefix = 'language_model'
40
+ _supports_flash_attn_2 = True
41
+ _no_split_modules = ['InternVisionModel', 'LlamaDecoderLayer', 'InternLM2DecoderLayer']
42
+
43
+ def __init__(self, config: InternVLChatConfig, vision_model=None, language_model=None, use_flash_attn=True):
44
+ super().__init__(config)
45
+
46
+ assert version_cmp(transformers.__version__, '4.37.0', 'ge')
47
+ image_size = config.force_image_size or config.vision_config.image_size
48
+ patch_size = config.vision_config.patch_size
49
+ self.patch_size = patch_size
50
+ self.select_layer = config.select_layer
51
+ self.template = config.template
52
+ self.num_image_token = int((image_size // patch_size) ** 2 * (config.downsample_ratio ** 2))
53
+ self.downsample_ratio = config.downsample_ratio
54
+ self.ps_version = config.ps_version
55
+ use_flash_attn = use_flash_attn if has_flash_attn else False
56
+ config.vision_config.use_flash_attn = True if use_flash_attn else False
57
+ config.llm_config.attn_implementation = 'flash_attention_2' if use_flash_attn else 'eager'
58
+
59
+ logger.info(f'num_image_token: {self.num_image_token}')
60
+ logger.info(f'ps_version: {self.ps_version}')
61
+ if vision_model is not None:
62
+ self.vision_model = vision_model
63
+ else:
64
+ self.vision_model = InternVisionModel(config.vision_config)
65
+ if language_model is not None:
66
+ self.language_model = language_model
67
+ else:
68
+ if config.llm_config.architectures[0] == 'LlamaForCausalLM':
69
+ self.language_model = LlamaForCausalLM(config.llm_config)
70
+ elif config.llm_config.architectures[0] == 'InternLM2ForCausalLM':
71
+ self.language_model = InternLM2ForCausalLM(config.llm_config)
72
+ else:
73
+ raise NotImplementedError(f'{config.llm_config.architectures[0]} is not implemented.')
74
+
75
+ vit_hidden_size = config.vision_config.hidden_size
76
+ llm_hidden_size = config.llm_config.hidden_size
77
+
78
+ self.mlp1 = nn.Sequential(
79
+ nn.LayerNorm(vit_hidden_size * int(1 / self.downsample_ratio) ** 2),
80
+ nn.Linear(vit_hidden_size * int(1 / self.downsample_ratio) ** 2, llm_hidden_size),
81
+ nn.GELU(),
82
+ nn.Linear(llm_hidden_size, llm_hidden_size)
83
+ )
84
+
85
+ self.img_context_token_id = None
86
+ self.conv_template = get_conv_template(self.template)
87
+ self.system_message = self.conv_template.system_message
88
+
89
+ def forward(
90
+ self,
91
+ pixel_values: torch.FloatTensor,
92
+ input_ids: torch.LongTensor = None,
93
+ attention_mask: Optional[torch.Tensor] = None,
94
+ position_ids: Optional[torch.LongTensor] = None,
95
+ image_flags: Optional[torch.LongTensor] = None,
96
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
97
+ labels: Optional[torch.LongTensor] = None,
98
+ use_cache: Optional[bool] = None,
99
+ output_attentions: Optional[bool] = None,
100
+ output_hidden_states: Optional[bool] = None,
101
+ return_dict: Optional[bool] = None,
102
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
103
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
104
+
105
+ image_flags = image_flags.squeeze(-1)
106
+ input_embeds = self.language_model.get_input_embeddings()(input_ids).clone()
107
+
108
+ vit_embeds = self.extract_feature(pixel_values)
109
+ vit_embeds = vit_embeds[image_flags == 1]
110
+ vit_batch_size = pixel_values.shape[0]
111
+
112
+ B, N, C = input_embeds.shape
113
+ input_embeds = input_embeds.reshape(B * N, C)
114
+
115
+ if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
116
+ print(f'dynamic ViT batch size: {vit_batch_size}, images per sample: {vit_batch_size / B}, dynamic token length: {N}')
117
+
118
+ input_ids = input_ids.reshape(B * N)
119
+ selected = (input_ids == self.img_context_token_id)
120
+ try:
121
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds.reshape(-1, C)
122
+ except Exception as e:
123
+ vit_embeds = vit_embeds.reshape(-1, C)
124
+ print(f'warning: {e}, input_embeds[selected].shape={input_embeds[selected].shape}, '
125
+ f'vit_embeds.shape={vit_embeds.shape}')
126
+ n_token = selected.sum()
127
+ input_embeds[selected] = input_embeds[selected] * 0.0 + vit_embeds[:n_token]
128
+
129
+ input_embeds = input_embeds.reshape(B, N, C)
130
+
131
+ outputs = self.language_model(
132
+ inputs_embeds=input_embeds,
133
+ attention_mask=attention_mask,
134
+ position_ids=position_ids,
135
+ past_key_values=past_key_values,
136
+ use_cache=use_cache,
137
+ output_attentions=output_attentions,
138
+ output_hidden_states=output_hidden_states,
139
+ return_dict=return_dict,
140
+ )
141
+ logits = outputs.logits
142
+
143
+ loss = None
144
+ if labels is not None:
145
+ # Shift so that tokens < n predict n
146
+ shift_logits = logits[..., :-1, :].contiguous()
147
+ shift_labels = labels[..., 1:].contiguous()
148
+ # Flatten the tokens
149
+ loss_fct = CrossEntropyLoss()
150
+ shift_logits = shift_logits.view(-1, self.language_model.config.vocab_size)
151
+ shift_labels = shift_labels.view(-1)
152
+ # Enable model parallelism
153
+ shift_labels = shift_labels.to(shift_logits.device)
154
+ loss = loss_fct(shift_logits, shift_labels)
155
+
156
+ if not return_dict:
157
+ output = (logits,) + outputs[1:]
158
+ return (loss,) + output if loss is not None else output
159
+
160
+ return CausalLMOutputWithPast(
161
+ loss=loss,
162
+ logits=logits,
163
+ past_key_values=outputs.past_key_values,
164
+ hidden_states=outputs.hidden_states,
165
+ attentions=outputs.attentions,
166
+ )
167
+
168
+ def pixel_shuffle(self, x, scale_factor=0.5):
169
+ n, w, h, c = x.size()
170
+ # N, W, H, C --> N, W, H * scale, C // scale
171
+ x = x.view(n, w, int(h * scale_factor), int(c / scale_factor))
172
+ # N, W, H * scale, C // scale --> N, H * scale, W, C // scale
173
+ x = x.permute(0, 2, 1, 3).contiguous()
174
+ # N, H * scale, W, C // scale --> N, H * scale, W * scale, C // (scale ** 2)
175
+ x = x.view(n, int(h * scale_factor), int(w * scale_factor),
176
+ int(c / (scale_factor * scale_factor)))
177
+ if self.ps_version == 'v1':
178
+ warnings.warn("In ps_version 'v1', the height and width have not been swapped back, "
179
+ 'which results in a transposed image.')
180
+ else:
181
+ x = x.permute(0, 2, 1, 3).contiguous()
182
+ return x
183
+
184
+ def extract_feature(self, pixel_values):
185
+ if self.select_layer == -1:
186
+ vit_embeds = self.vision_model(
187
+ pixel_values=pixel_values,
188
+ output_hidden_states=False,
189
+ return_dict=True).last_hidden_state
190
+ else:
191
+ vit_embeds = self.vision_model(
192
+ pixel_values=pixel_values,
193
+ output_hidden_states=True,
194
+ return_dict=True).hidden_states[self.select_layer]
195
+ vit_embeds = vit_embeds[:, 1:, :]
196
+
197
+ h = w = int(vit_embeds.shape[1] ** 0.5)
198
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], h, w, -1)
199
+ vit_embeds = self.pixel_shuffle(vit_embeds, scale_factor=self.downsample_ratio)
200
+ vit_embeds = vit_embeds.reshape(vit_embeds.shape[0], -1, vit_embeds.shape[-1])
201
+ vit_embeds = self.mlp1(vit_embeds)
202
+ return vit_embeds
203
+
204
+ def batch_chat(self, tokenizer, pixel_values, questions, generation_config, num_patches_list=None,
205
+ history=None, return_history=False, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>',
206
+ IMG_CONTEXT_TOKEN='<IMG_CONTEXT>', verbose=False, image_counts=None):
207
+ if history is not None or return_history:
208
+ print('Now multi-turn chat is not supported in batch_chat.')
209
+ raise NotImplementedError
210
+
211
+ if image_counts is not None:
212
+ num_patches_list = image_counts
213
+ print('Warning: `image_counts` is deprecated. Please use `num_patches_list` instead.')
214
+
215
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
216
+ self.img_context_token_id = img_context_token_id
217
+
218
+ if verbose and pixel_values is not None:
219
+ image_bs = pixel_values.shape[0]
220
+ print(f'dynamic ViT batch size: {image_bs}')
221
+
222
+ queries = []
223
+ for idx, num_patches in enumerate(num_patches_list):
224
+ question = questions[idx]
225
+ if pixel_values is not None and '<image>' not in question:
226
+ question = '<image>\n' + question
227
+ template = get_conv_template(self.template)
228
+ template.system_message = self.system_message
229
+ template.append_message(template.roles[0], question)
230
+ template.append_message(template.roles[1], None)
231
+ query = template.get_prompt()
232
+
233
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
234
+ query = query.replace('<image>', image_tokens, 1)
235
+ queries.append(query)
236
+
237
+ tokenizer.padding_side = 'left'
238
+ model_inputs = tokenizer(queries, return_tensors='pt', padding=True)
239
+ input_ids = model_inputs['input_ids'].to(self.device)
240
+ attention_mask = model_inputs['attention_mask'].to(self.device)
241
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
242
+ generation_config['eos_token_id'] = eos_token_id
243
+ generation_output = self.generate(
244
+ pixel_values=pixel_values,
245
+ input_ids=input_ids,
246
+ attention_mask=attention_mask,
247
+ **generation_config
248
+ )
249
+ responses = tokenizer.batch_decode(generation_output, skip_special_tokens=True)
250
+ responses = [response.split(template.sep.strip())[0].strip() for response in responses]
251
+ return responses
252
+
253
+ def chat(self, tokenizer, pixel_values, question, generation_config, history=None, return_history=False,
254
+ num_patches_list=None, IMG_START_TOKEN='<img>', IMG_END_TOKEN='</img>', IMG_CONTEXT_TOKEN='<IMG_CONTEXT>',
255
+ verbose=False):
256
+
257
+ if history is None and pixel_values is not None and '<image>' not in question:
258
+ question = '<image>\n' + question
259
+
260
+ if num_patches_list is None:
261
+ num_patches_list = [pixel_values.shape[0]] if pixel_values is not None else []
262
+ assert pixel_values is None or len(pixel_values) == sum(num_patches_list)
263
+
264
+ img_context_token_id = tokenizer.convert_tokens_to_ids(IMG_CONTEXT_TOKEN)
265
+ self.img_context_token_id = img_context_token_id
266
+
267
+ template = get_conv_template(self.template)
268
+ template.system_message = self.system_message
269
+ eos_token_id = tokenizer.convert_tokens_to_ids(template.sep.strip())
270
+
271
+ history = [] if history is None else history
272
+ for (old_question, old_answer) in history:
273
+ template.append_message(template.roles[0], old_question)
274
+ template.append_message(template.roles[1], old_answer)
275
+ template.append_message(template.roles[0], question)
276
+ template.append_message(template.roles[1], None)
277
+ query = template.get_prompt()
278
+
279
+ if verbose and pixel_values is not None:
280
+ image_bs = pixel_values.shape[0]
281
+ print(f'dynamic ViT batch size: {image_bs}')
282
+
283
+ for num_patches in num_patches_list:
284
+ image_tokens = IMG_START_TOKEN + IMG_CONTEXT_TOKEN * self.num_image_token * num_patches + IMG_END_TOKEN
285
+ query = query.replace('<image>', image_tokens, 1)
286
+
287
+ model_inputs = tokenizer(query, return_tensors='pt')
288
+ input_ids = model_inputs['input_ids'].to(self.device)
289
+ attention_mask = model_inputs['attention_mask'].to(self.device)
290
+ generation_config['eos_token_id'] = eos_token_id
291
+ generation_output = self.generate(
292
+ pixel_values=pixel_values,
293
+ input_ids=input_ids,
294
+ attention_mask=attention_mask,
295
+ **generation_config
296
+ )
297
+ response = tokenizer.batch_decode(generation_output, skip_special_tokens=True)[0]
298
+ response = response.split(template.sep.strip())[0].strip()
299
+ history.append((question, response))
300
+ if return_history:
301
+ return response, history
302
+ else:
303
+ query_to_print = query.replace(IMG_CONTEXT_TOKEN, '')
304
+ query_to_print = query_to_print.replace(f'{IMG_START_TOKEN}{IMG_END_TOKEN}', '<image>')
305
+ if verbose:
306
+ print(query_to_print, response)
307
+ return response
308
+
309
+ @torch.no_grad()
310
+ def generate(
311
+ self,
312
+ pixel_values: Optional[torch.FloatTensor] = None,
313
+ input_ids: Optional[torch.FloatTensor] = None,
314
+ attention_mask: Optional[torch.LongTensor] = None,
315
+ visual_features: Optional[torch.FloatTensor] = None,
316
+ generation_config: Optional[GenerationConfig] = None,
317
+ output_hidden_states: Optional[bool] = None,
318
+ **generate_kwargs,
319
+ ) -> torch.LongTensor:
320
+
321
+ assert self.img_context_token_id is not None
322
+ if pixel_values is not None:
323
+ if visual_features is not None:
324
+ vit_embeds = visual_features
325
+ else:
326
+ vit_embeds = self.extract_feature(pixel_values)
327
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
328
+ B, N, C = input_embeds.shape
329
+ input_embeds = input_embeds.reshape(B * N, C)
330
+
331
+ input_ids = input_ids.reshape(B * N)
332
+ selected = (input_ids == self.img_context_token_id)
333
+ assert selected.sum() != 0
334
+ input_embeds[selected] = vit_embeds.reshape(-1, C).to(input_embeds.device)
335
+
336
+ input_embeds = input_embeds.reshape(B, N, C)
337
+ else:
338
+ input_embeds = self.language_model.get_input_embeddings()(input_ids)
339
+
340
+ outputs = self.language_model.generate(
341
+ inputs_embeds=input_embeds,
342
+ attention_mask=attention_mask,
343
+ generation_config=generation_config,
344
+ output_hidden_states=output_hidden_states,
345
+ use_cache=True,
346
+ **generate_kwargs,
347
+ )
348
+
349
+ return outputs
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16254a2356eb4cd4ce872efef91e4d3469f8895921f752bb9fde370a7aaff78b
3
+ size 15216
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f892b200ad237b2759ddf2a50439ec46b28079ac46b38369f8bee6b0af4deaf1
3
+ size 15216
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc936bb7e516b469b192a1ea64b2a6eba6a68d1f7c2a4b9ed5ef05ea0726b038
3
+ size 15216
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5d663f4b8ec8b760dd982ffa6b496d24c7c06af06a4590f8a1531c6190da97c
3
+ size 15280
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcde72ebd35534926b5973bec35c8109a79e1c6147a9bef4920a74f33f427622
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|action_start|>",
6
+ "<|action_end|>",
7
+ "<|interpreter|>",
8
+ "<|plugin|>",
9
+ "<img>",
10
+ "</img>",
11
+ "<IMG_CONTEXT>",
12
+ "<quad>",
13
+ "</quad>",
14
+ "<ref>",
15
+ "</ref>",
16
+ "<box>",
17
+ "</box>"
18
+ ],
19
+ "bos_token": {
20
+ "content": "<s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "eos_token": {
27
+ "content": "</s>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ },
33
+ "pad_token": {
34
+ "content": "</s>",
35
+ "lstrip": false,
36
+ "normalized": false,
37
+ "rstrip": false,
38
+ "single_word": false
39
+ },
40
+ "unk_token": {
41
+ "content": "<unk>",
42
+ "lstrip": false,
43
+ "normalized": false,
44
+ "rstrip": false,
45
+ "single_word": false
46
+ }
47
+ }
tokenization_internlm2.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/tokenization_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Tokenization classes for InternLM."""
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import Any, Dict, List, Optional, Tuple
21
+
22
+ import sentencepiece as spm
23
+ from transformers.tokenization_utils import PreTrainedTokenizer
24
+ from transformers.utils import logging
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+ VOCAB_FILES_NAMES = {'vocab_file': './tokenizer.model'}
29
+
30
+ PRETRAINED_VOCAB_FILES_MAP = {}
31
+
32
+
33
+ # Modified from transformers.model.llama.tokenization_llama.LlamaTokenizer
34
+ class InternLM2Tokenizer(PreTrainedTokenizer):
35
+ """
36
+ Construct a InternLM2 tokenizer. Based on byte-level Byte-Pair-Encoding.
37
+
38
+ Args:
39
+ vocab_file (`str`):
40
+ Path to the vocabulary file.
41
+ """
42
+
43
+ vocab_files_names = VOCAB_FILES_NAMES
44
+ pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
45
+ model_input_names = ['input_ids', 'attention_mask']
46
+ _auto_class = 'AutoTokenizer'
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file,
51
+ unk_token='<unk>',
52
+ bos_token='<s>',
53
+ eos_token='</s>',
54
+ pad_token='</s>',
55
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
56
+ add_bos_token=True,
57
+ add_eos_token=False,
58
+ decode_with_prefix_space=False,
59
+ clean_up_tokenization_spaces=False,
60
+ **kwargs,
61
+ ):
62
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
63
+ self.vocab_file = vocab_file
64
+ self.add_bos_token = add_bos_token
65
+ self.add_eos_token = add_eos_token
66
+ self.decode_with_prefix_space = decode_with_prefix_space
67
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
68
+ self.sp_model.Load(vocab_file)
69
+ self._no_prefix_space_tokens = None
70
+ super().__init__(
71
+ bos_token=bos_token,
72
+ eos_token=eos_token,
73
+ unk_token=unk_token,
74
+ pad_token=pad_token,
75
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
76
+ **kwargs,
77
+ )
78
+
79
+ @property
80
+ def no_prefix_space_tokens(self):
81
+ if self._no_prefix_space_tokens is None:
82
+ vocab = self.convert_ids_to_tokens(list(range(self.vocab_size)))
83
+ self._no_prefix_space_tokens = {i for i, tok in enumerate(vocab) if not tok.startswith('▁')}
84
+ return self._no_prefix_space_tokens
85
+
86
+ @property
87
+ def vocab_size(self):
88
+ """Returns vocab size"""
89
+ return self.sp_model.get_piece_size()
90
+
91
+ @property
92
+ def bos_token_id(self) -> Optional[int]:
93
+ return self.sp_model.bos_id()
94
+
95
+ @property
96
+ def eos_token_id(self) -> Optional[int]:
97
+ return self.sp_model.eos_id()
98
+
99
+ def get_vocab(self):
100
+ """Returns vocab as a dict"""
101
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
102
+ vocab.update(self.added_tokens_encoder)
103
+ return vocab
104
+
105
+ def _tokenize(self, text):
106
+ """Returns a tokenized string."""
107
+ return self.sp_model.encode(text, out_type=str)
108
+
109
+ def _convert_token_to_id(self, token):
110
+ """Converts a token (str) in an id using the vocab."""
111
+ return self.sp_model.piece_to_id(token)
112
+
113
+ def _convert_id_to_token(self, index):
114
+ """Converts an index (integer) in a token (str) using the vocab."""
115
+ token = self.sp_model.IdToPiece(index)
116
+ return token
117
+
118
+ def _maybe_add_prefix_space(self, tokens, decoded):
119
+ if tokens and tokens[0] not in self.no_prefix_space_tokens:
120
+ return ' ' + decoded
121
+ else:
122
+ return decoded
123
+
124
+ def convert_tokens_to_string(self, tokens):
125
+ """Converts a sequence of tokens (string) in a single string."""
126
+ current_sub_tokens = []
127
+ out_string = ''
128
+ prev_is_special = False
129
+ for token in tokens:
130
+ # make sure that special tokens are not decoded using sentencepiece model
131
+ if token in self.all_special_tokens:
132
+ if not prev_is_special:
133
+ out_string += ' '
134
+ out_string += self.sp_model.decode(current_sub_tokens) + token
135
+ prev_is_special = True
136
+ current_sub_tokens = []
137
+ else:
138
+ current_sub_tokens.append(token)
139
+ prev_is_special = False
140
+ out_string += self.sp_model.decode(current_sub_tokens)
141
+ out_string = self.clean_up_tokenization(out_string)
142
+ out_string = self._maybe_add_prefix_space(tokens=tokens, decoded=out_string)
143
+ return out_string[1:]
144
+
145
+ def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
146
+ """
147
+ Save the vocabulary and special tokens file to a directory.
148
+
149
+ Args:
150
+ save_directory (`str`):
151
+ The directory in which to save the vocabulary.
152
+
153
+ Returns:
154
+ `Tuple(str)`: Paths to the files saved.
155
+ """
156
+ if not os.path.isdir(save_directory):
157
+ logger.error(f'Vocabulary path ({save_directory}) should be a directory')
158
+ return
159
+ out_vocab_file = os.path.join(
160
+ save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']
161
+ )
162
+
163
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
164
+ copyfile(self.vocab_file, out_vocab_file)
165
+ elif not os.path.isfile(self.vocab_file):
166
+ with open(out_vocab_file, 'wb') as fi:
167
+ content_spiece_model = self.sp_model.serialized_model_proto()
168
+ fi.write(content_spiece_model)
169
+
170
+ return (out_vocab_file,)
171
+
172
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
173
+ if self.add_bos_token:
174
+ bos_token_ids = [self.bos_token_id]
175
+ else:
176
+ bos_token_ids = []
177
+
178
+ output = bos_token_ids + token_ids_0
179
+
180
+ if token_ids_1 is not None:
181
+ output = output + token_ids_1
182
+
183
+ if self.add_eos_token:
184
+ output = output + [self.eos_token_id]
185
+
186
+ return output
187
+
188
+ def get_special_tokens_mask(
189
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
190
+ ) -> List[int]:
191
+ """
192
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
193
+ special tokens using the tokenizer `prepare_for_model` method.
194
+
195
+ Args:
196
+ token_ids_0 (`List[int]`):
197
+ List of IDs.
198
+ token_ids_1 (`List[int]`, *optional*):
199
+ Optional second list of IDs for sequence pairs.
200
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
201
+ Whether or not the token list is already formatted with special tokens for the model.
202
+
203
+ Returns:
204
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
205
+ """
206
+ if already_has_special_tokens:
207
+ return super().get_special_tokens_mask(
208
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
209
+ )
210
+
211
+ if token_ids_1 is None:
212
+ return [1] + ([0] * len(token_ids_0)) + [1]
213
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
214
+
215
+ def create_token_type_ids_from_sequences(
216
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
217
+ ) -> List[int]:
218
+ """
219
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
220
+ use of token type ids, therefore a list of zeros is returned.
221
+
222
+ Args:
223
+ token_ids_0 (`List[int]`):
224
+ List of IDs.
225
+ token_ids_1 (`List[int]`, *optional*):
226
+ Optional second list of IDs for sequence pairs.
227
+
228
+ Returns:
229
+ `List[int]`: List of zeros.
230
+ """
231
+ eos = [self.eos_token_id]
232
+
233
+ if token_ids_1 is None:
234
+ return len(token_ids_0 + eos) * [0]
235
+ return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f868398fc4e05ee1e8aeba95ddf18ddcc45b8bce55d5093bead5bbf80429b48b
3
+ size 1477754
tokenizer_config.json ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "92538": {
28
+ "content": "<|plugin|>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "92539": {
36
+ "content": "<|interpreter|>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "92540": {
44
+ "content": "<|action_end|>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "92541": {
52
+ "content": "<|action_start|>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "92542": {
60
+ "content": "<|im_end|>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "92543": {
68
+ "content": "<|im_start|>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "92544": {
76
+ "content": "<img>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "92545": {
84
+ "content": "</img>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "92546": {
92
+ "content": "<IMG_CONTEXT>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "92547": {
100
+ "content": "<quad>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "92548": {
108
+ "content": "</quad>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "92549": {
116
+ "content": "<ref>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "92550": {
124
+ "content": "</ref>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "92551": {
132
+ "content": "<box>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "92552": {
140
+ "content": "</box>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ }
147
+ },
148
+ "additional_special_tokens": [
149
+ "<|im_start|>",
150
+ "<|im_end|>",
151
+ "<|action_start|>",
152
+ "<|action_end|>",
153
+ "<|interpreter|>",
154
+ "<|plugin|>",
155
+ "<img>",
156
+ "</img>",
157
+ "<IMG_CONTEXT>",
158
+ "<quad>",
159
+ "</quad>",
160
+ "<ref>",
161
+ "</ref>",
162
+ "<box>",
163
+ "</box>"
164
+ ],
165
+ "auto_map": {
166
+ "AutoTokenizer": [
167
+ "tokenization_internlm2.InternLM2Tokenizer",
168
+ null
169
+ ]
170
+ },
171
+ "bos_token": "<s>",
172
+ "chat_template": "{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
173
+ "clean_up_tokenization_spaces": false,
174
+ "eos_token": "</s>",
175
+ "extra_special_tokens": {},
176
+ "model_max_length": 16384,
177
+ "pad_token": "</s>",
178
+ "tokenizer_class": "InternLM2Tokenizer",
179
+ "unk_token": "<unk>"
180
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:807e4097dfb5abdbcb4cb645f24d4fbe76abad7a3f5e82846098d49861eff89e
3
+ size 9656
zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info(f"Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info(f"Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)