chitanda commited on
Commit
9582185
·
verified ·
1 Parent(s): d9f71f4

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. checkpoint-100/config.json +29 -0
  3. checkpoint-100/generation_config.json +7 -0
  4. checkpoint-100/math500.test.v1.1.0shot.jsonl +0 -0
  5. checkpoint-100/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json +0 -0
  6. checkpoint-100/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl +0 -0
  7. checkpoint-100/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json +6 -0
  8. checkpoint-100/pytorch_model.bin +3 -0
  9. checkpoint-100/special_tokens_map.json +30 -0
  10. checkpoint-100/tokenizer.json +0 -0
  11. checkpoint-100/tokenizer_config.json +42 -0
  12. checkpoint-100/training_config.yaml +144 -0
  13. checkpoint-200/config.json +29 -0
  14. checkpoint-200/generation_config.json +7 -0
  15. checkpoint-200/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json +0 -0
  16. checkpoint-200/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl +0 -0
  17. checkpoint-200/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json +6 -0
  18. checkpoint-200/pytorch_model.bin +3 -0
  19. checkpoint-200/special_tokens_map.json +30 -0
  20. checkpoint-200/tokenizer.json +0 -0
  21. checkpoint-200/tokenizer_config.json +42 -0
  22. checkpoint-200/training_config.yaml +144 -0
  23. checkpoint-300/config.json +29 -0
  24. checkpoint-300/generation_config.json +7 -0
  25. checkpoint-300/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json +0 -0
  26. checkpoint-300/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl +0 -0
  27. checkpoint-300/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json +6 -0
  28. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.json +3 -0
  29. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.jsonl +3 -0
  30. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.metrics.json +6 -0
  31. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.json +3 -0
  32. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.jsonl +3 -0
  33. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.metrics.json +6 -0
  34. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.json +3 -0
  35. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.jsonl +3 -0
  36. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.metrics.json +6 -0
  37. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.json +3 -0
  38. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.jsonl +3 -0
  39. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.metrics.json +6 -0
  40. checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.decomposed.dpo.json +3 -0
  41. checkpoint-300/pytorch_model.bin +3 -0
  42. checkpoint-300/special_tokens_map.json +30 -0
  43. checkpoint-300/tokenizer.json +0 -0
  44. checkpoint-300/tokenizer_config.json +42 -0
  45. checkpoint-300/training_config.yaml +144 -0
  46. checkpoint-400/config.json +29 -0
  47. checkpoint-400/generation_config.json +7 -0
  48. checkpoint-400/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json +0 -0
  49. checkpoint-400/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl +0 -0
  50. checkpoint-400/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json +6 -0
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.json filter=lfs diff=lfs merge=lfs -text
37
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.jsonl filter=lfs diff=lfs merge=lfs -text
38
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.json filter=lfs diff=lfs merge=lfs -text
39
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.jsonl filter=lfs diff=lfs merge=lfs -text
40
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.json filter=lfs diff=lfs merge=lfs -text
41
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.jsonl filter=lfs diff=lfs merge=lfs -text
42
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.json filter=lfs diff=lfs merge=lfs -text
43
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.jsonl filter=lfs diff=lfs merge=lfs -text
44
+ checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.decomposed.dpo.json filter=lfs diff=lfs merge=lfs -text
checkpoint-100/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 100000,
9
+ "eos_token_id": 100001,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 11008,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 30,
18
+ "num_key_value_heads": 32,
19
+ "pad_token_id": 100001,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": false,
28
+ "vocab_size": 102400
29
+ }
checkpoint-100/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "eos_token_id": 100001,
5
+ "pad_token_id": 100001,
6
+ "transformers_version": "4.40.1"
7
+ }
checkpoint-100/math500.test.v1.1.0shot.jsonl ADDED
File without changes
checkpoint-100/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.10869565217391304,
3
+ "pass@k": 0.32608695652173914,
4
+ "correct": 30,
5
+ "total": 276
6
+ }
checkpoint-100/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4fc4fd23ef6e7b66a83899b97362d5132b53e85184416c98e7d335eec19f0e7
3
+ size 13820769582
checkpoint-100/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-100/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-100/tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "100000": {
6
+ "content": "<|begin▁of▁sentence|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "100001": {
14
+ "content": "<|end▁of▁sentence|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "100002": {
22
+ "content": "<unk>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<|begin▁of▁sentence|>",
31
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|end▁of▁sentence|>",
34
+ "legacy": true,
35
+ "model_max_length": 4096,
36
+ "pad_token": "<|end▁of▁sentence|>",
37
+ "padding_side": "left",
38
+ "sp_model_kwargs": {},
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
checkpoint-100/training_config.yaml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ds_cfg:
2
+ train_micro_batch_size_per_gpu: ${per_gpu_train_batch_size}
3
+ gradient_accumulation_steps: ${gradient_accumulation_steps}
4
+ scheduler:
5
+ type: WarmupDecayLR
6
+ params:
7
+ total_num_steps: 404
8
+ warmup_max_lr: ${learning_rate}
9
+ warmup_num_steps: 24
10
+ warmup_type: linear
11
+ optimizer:
12
+ type: AdamW
13
+ params:
14
+ lr: ${learning_rate}
15
+ betas:
16
+ - 0.9
17
+ - 0.95
18
+ eps: 1.0e-06
19
+ weight_decay: ${weight_decay}
20
+ bf16:
21
+ enabled: true
22
+ zero_optimization:
23
+ stage: 1
24
+ offload_optimizer:
25
+ device: cpu
26
+ pin_memory: true
27
+ stage3_param_persistence_threshold: 100000.0
28
+ stage3_max_live_parameters: 100000000.0
29
+ stage3_prefetch_bucket_size: 100000000.0
30
+ memory_efficient_linear: false
31
+ steps_per_print: 25
32
+ gradient_clipping: 1.0
33
+ prescale_gradients: false
34
+ sft_model_dir: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200
35
+ train_file: ${sft_model_dir}/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.decomposed.json
36
+ dev_file: null
37
+ test_file: null
38
+ torch_dtype:
39
+ _target_: general_util.training_utils.return_torch_dtype
40
+ dtype: bfloat16
41
+ tokenizer_init:
42
+ _target_: general_util.tokenization_utils.init_tokenizer
43
+ tokenizer_path: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200
44
+ padding_side: left
45
+ device_map:
46
+ _target_: models.utils.return_single_device_map
47
+ model:
48
+ _target_: models.llama.LlamaForCausalLMDPO.from_pretrained_with_ref_model
49
+ beta: 0.5
50
+ gradient_checkpointing: true
51
+ attn_implementation: flash_attention_2
52
+ torch_dtype: ${torch_dtype}
53
+ device_map: ${device_map}
54
+ ref_model:
55
+ _target_: models.llama.LlamaForCausalLMDPO.from_pretrained
56
+ pretrained_model_name_or_path: ${model_name_or_path}
57
+ torch_dtype: ${torch_dtype}
58
+ attn_implementation: flash_attention_2
59
+ device_map: ${device_map}
60
+ read_tensor:
61
+ _target_: data.logic_combine.MultiMappingDataset
62
+ aligner:
63
+ _target_: data.input_aligner.concat_aligner
64
+ aligners:
65
+ - _target_: data.input_aligner.dpo_pair_aligner
66
+ pos_field: chosen_response
67
+ neg_field: reject_response
68
+ template:
69
+ chosen: '{text}{pos}<|end▁of▁sentence|>'
70
+ reject: '{text}{neg}<|end▁of▁sentence|>'
71
+ prompt: '{text}'
72
+ kv_mapping:
73
+ chosen: chosen
74
+ reject: reject
75
+ id: index
76
+ prompt: prompt
77
+ dist_load_data_barrier: false
78
+ extended_vocab: null
79
+ collator:
80
+ _target_: data.dpo.DPOCollator
81
+ tokenizer: ${tokenizer_init}
82
+ max_seq_length: 1024
83
+ num_workers: 8
84
+ prefetch_factor: 2
85
+ model_name_or_path: ${sft_model_dir}
86
+ pretrain: null
87
+ dp_size: 4
88
+ tp_size: 1
89
+ pp_size: 1
90
+ exp_name: deepseek-math.7b.ins.meta_math_cot.math55k.n5.critic_correct.dpo.H100.w4.v2.0.s${seed}
91
+ exp_notes: null
92
+ output_dir: experiments/${exp_name}
93
+ do_train: true
94
+ evaluate_during_training: false
95
+ do_eval: false
96
+ eval_sub_path: checkpoint-100
97
+ per_gpu_train_batch_size: 4
98
+ per_gpu_eval_batch_size: 4
99
+ learning_rate: 1.0e-06
100
+ gradient_accumulation_steps: 4
101
+ weight_decay: 0.1
102
+ adam_epsilon: 1.0e-06
103
+ adam_betas: (0.9, 0.98)
104
+ total_dataset_len: 25864
105
+ max_grad_norm: 1.0
106
+ num_train_epochs: 1
107
+ max_steps: 0
108
+ warmup_proportion: 0.06
109
+ warmup_steps: 0
110
+ optimizer: null
111
+ use_nvlamb: null
112
+ bit_training: null
113
+ logging_steps: 5
114
+ save_ds_state: false
115
+ save_steps: 100
116
+ save_best: false
117
+ eval_steps: 400
118
+ ddp_eval: true
119
+ no_cuda: false
120
+ seed: 42
121
+ local_rank: 0
122
+ fp16: true
123
+ fp16_opt_level: O1
124
+ fp16_bfloat16: true
125
+ prediction_cfg:
126
+ metric: loss
127
+ measure: -1
128
+ best_checkpoint: null
129
+ best_result: null
130
+ eval_forward_fn:
131
+ _target_: general_util.evaluator.DefaultForwardFn
132
+ post_process:
133
+ _target_: post_processors.dpo.DPOEvalPostProcessor
134
+ summary_helper:
135
+ _target_: general_util.tensorboard_helper.WandbWriter
136
+ batch_index_or_keys: null
137
+ outputs_index_or_keys:
138
+ train/chosen_reward: chosen_reward
139
+ train/rejected_reward: rejected_reward
140
+ n_gpu: 1
141
+ device: cuda:0
142
+ train_batch_size: 4
143
+ eval_batch_size: null
144
+ world_size: 4
checkpoint-200/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 100000,
9
+ "eos_token_id": 100001,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 11008,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 30,
18
+ "num_key_value_heads": 32,
19
+ "pad_token_id": 100001,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": false,
28
+ "vocab_size": 102400
29
+ }
checkpoint-200/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "eos_token_id": 100001,
5
+ "pad_token_id": 100001,
6
+ "transformers_version": "4.40.1"
7
+ }
checkpoint-200/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-200/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-200/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.10144927536231885,
3
+ "pass@k": 0.2898550724637681,
4
+ "correct": 28,
5
+ "total": 276
6
+ }
checkpoint-200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99a1e19619c6c86bc6ca9d86c031d1ab5dce94f9229e1227d65992f88c2217a3
3
+ size 13820769582
checkpoint-200/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-200/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-200/tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "100000": {
6
+ "content": "<|begin▁of▁sentence|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "100001": {
14
+ "content": "<|end▁of▁sentence|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "100002": {
22
+ "content": "<unk>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<|begin▁of▁sentence|>",
31
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|end▁of▁sentence|>",
34
+ "legacy": true,
35
+ "model_max_length": 4096,
36
+ "pad_token": "<|end▁of▁sentence|>",
37
+ "padding_side": "left",
38
+ "sp_model_kwargs": {},
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
checkpoint-200/training_config.yaml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ds_cfg:
2
+ train_micro_batch_size_per_gpu: ${per_gpu_train_batch_size}
3
+ gradient_accumulation_steps: ${gradient_accumulation_steps}
4
+ scheduler:
5
+ type: WarmupDecayLR
6
+ params:
7
+ total_num_steps: 404
8
+ warmup_max_lr: ${learning_rate}
9
+ warmup_num_steps: 24
10
+ warmup_type: linear
11
+ optimizer:
12
+ type: AdamW
13
+ params:
14
+ lr: ${learning_rate}
15
+ betas:
16
+ - 0.9
17
+ - 0.95
18
+ eps: 1.0e-06
19
+ weight_decay: ${weight_decay}
20
+ bf16:
21
+ enabled: true
22
+ zero_optimization:
23
+ stage: 1
24
+ offload_optimizer:
25
+ device: cpu
26
+ pin_memory: true
27
+ stage3_param_persistence_threshold: 100000.0
28
+ stage3_max_live_parameters: 100000000.0
29
+ stage3_prefetch_bucket_size: 100000000.0
30
+ memory_efficient_linear: false
31
+ steps_per_print: 25
32
+ gradient_clipping: 1.0
33
+ prescale_gradients: false
34
+ sft_model_dir: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200
35
+ train_file: ${sft_model_dir}/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.decomposed.json
36
+ dev_file: null
37
+ test_file: null
38
+ torch_dtype:
39
+ _target_: general_util.training_utils.return_torch_dtype
40
+ dtype: bfloat16
41
+ tokenizer_init:
42
+ _target_: general_util.tokenization_utils.init_tokenizer
43
+ tokenizer_path: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200
44
+ padding_side: left
45
+ device_map:
46
+ _target_: models.utils.return_single_device_map
47
+ model:
48
+ _target_: models.llama.LlamaForCausalLMDPO.from_pretrained_with_ref_model
49
+ beta: 0.5
50
+ gradient_checkpointing: true
51
+ attn_implementation: flash_attention_2
52
+ torch_dtype: ${torch_dtype}
53
+ device_map: ${device_map}
54
+ ref_model:
55
+ _target_: models.llama.LlamaForCausalLMDPO.from_pretrained
56
+ pretrained_model_name_or_path: ${model_name_or_path}
57
+ torch_dtype: ${torch_dtype}
58
+ attn_implementation: flash_attention_2
59
+ device_map: ${device_map}
60
+ read_tensor:
61
+ _target_: data.logic_combine.MultiMappingDataset
62
+ aligner:
63
+ _target_: data.input_aligner.concat_aligner
64
+ aligners:
65
+ - _target_: data.input_aligner.dpo_pair_aligner
66
+ pos_field: chosen_response
67
+ neg_field: reject_response
68
+ template:
69
+ chosen: '{text}{pos}<|end▁of▁sentence|>'
70
+ reject: '{text}{neg}<|end▁of▁sentence|>'
71
+ prompt: '{text}'
72
+ kv_mapping:
73
+ chosen: chosen
74
+ reject: reject
75
+ id: index
76
+ prompt: prompt
77
+ dist_load_data_barrier: false
78
+ extended_vocab: null
79
+ collator:
80
+ _target_: data.dpo.DPOCollator
81
+ tokenizer: ${tokenizer_init}
82
+ max_seq_length: 1024
83
+ num_workers: 8
84
+ prefetch_factor: 2
85
+ model_name_or_path: ${sft_model_dir}
86
+ pretrain: null
87
+ dp_size: 4
88
+ tp_size: 1
89
+ pp_size: 1
90
+ exp_name: deepseek-math.7b.ins.meta_math_cot.math55k.n5.critic_correct.dpo.H100.w4.v2.0.s${seed}
91
+ exp_notes: null
92
+ output_dir: experiments/${exp_name}
93
+ do_train: true
94
+ evaluate_during_training: false
95
+ do_eval: false
96
+ eval_sub_path: checkpoint-100
97
+ per_gpu_train_batch_size: 4
98
+ per_gpu_eval_batch_size: 4
99
+ learning_rate: 1.0e-06
100
+ gradient_accumulation_steps: 4
101
+ weight_decay: 0.1
102
+ adam_epsilon: 1.0e-06
103
+ adam_betas: (0.9, 0.98)
104
+ total_dataset_len: 25864
105
+ max_grad_norm: 1.0
106
+ num_train_epochs: 1
107
+ max_steps: 0
108
+ warmup_proportion: 0.06
109
+ warmup_steps: 0
110
+ optimizer: null
111
+ use_nvlamb: null
112
+ bit_training: null
113
+ logging_steps: 5
114
+ save_ds_state: false
115
+ save_steps: 100
116
+ save_best: false
117
+ eval_steps: 400
118
+ ddp_eval: true
119
+ no_cuda: false
120
+ seed: 42
121
+ local_rank: 0
122
+ fp16: true
123
+ fp16_opt_level: O1
124
+ fp16_bfloat16: true
125
+ prediction_cfg:
126
+ metric: loss
127
+ measure: -1
128
+ best_checkpoint: null
129
+ best_result: null
130
+ eval_forward_fn:
131
+ _target_: general_util.evaluator.DefaultForwardFn
132
+ post_process:
133
+ _target_: post_processors.dpo.DPOEvalPostProcessor
134
+ summary_helper:
135
+ _target_: general_util.tensorboard_helper.WandbWriter
136
+ batch_index_or_keys: null
137
+ outputs_index_or_keys:
138
+ train/chosen_reward: chosen_reward
139
+ train/rejected_reward: rejected_reward
140
+ n_gpu: 1
141
+ device: cuda:0
142
+ train_batch_size: 4
143
+ eval_batch_size: null
144
+ world_size: 4
checkpoint-300/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 100000,
9
+ "eos_token_id": 100001,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 11008,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 30,
18
+ "num_key_value_heads": 32,
19
+ "pad_token_id": 100001,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": false,
28
+ "vocab_size": 102400
29
+ }
checkpoint-300/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "eos_token_id": 100001,
5
+ "pad_token_id": 100001,
6
+ "transformers_version": "4.40.1"
7
+ }
checkpoint-300/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-300/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-300/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.14130434782608695,
3
+ "pass@k": 0.3115942028985507,
4
+ "correct": 39,
5
+ "total": 276
6
+ }
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f3b7706cdd3548a4654f8b2dda9f4e0969cd3ba11887e48d2db359219f2ed3
3
+ size 24658738
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49bae6b021243e6c8293a5c9f8613a6ad3e9ee6a9f46cd59825bc4fb895ffcfe
3
+ size 24261798
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.0-of-4.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.3133608815426997,
3
+ "pass@k": 0.5650826446280992,
4
+ "correct": 910,
5
+ "total": 2904
6
+ }
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f771fbf612c6827707a8377ed614d316e43e360d1c29abffacc3281ec2843979
3
+ size 25073186
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9655dfa50808353edfd8956044acaf85ab0a18849131513aa8cfc450fe3fecbe
3
+ size 24676309
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.1-of-4.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.33505509641873277,
3
+ "pass@k": 0.5702479338842975,
4
+ "correct": 973,
5
+ "total": 2904
6
+ }
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b697655ea6a42ec440866406701502d81a02d49a46473d5bae69f2b1d83ec34
3
+ size 25072252
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fffb79729b9d5c108904d900326760713c87ad7042c822d9bbb8e8522be948b9
3
+ size 24675394
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.2-of-4.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.3415977961432507,
3
+ "pass@k": 0.581267217630854,
4
+ "correct": 992,
5
+ "total": 2904
6
+ }
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87a0b5b90a2152dfbb524cdb5af0e48e7e8100ca11a37d06e27768ecc55e8a7a
3
+ size 24848726
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ff23a705afc98563b8b64354c06df79493628c8c5fb45439db9ca83f3dd7cf1
3
+ size 24452130
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.3-of-4.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.33769813921433495,
3
+ "pass@k": 0.5678842177808407,
4
+ "correct": 980,
5
+ "total": 2902
6
+ }
checkpoint-300/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.decomposed.dpo.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00c2c8efe212cb48172dbabec60545eb301ea7cd0dde45a50e2e786830f0d8ad
3
+ size 264188903
checkpoint-300/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7238d3011e15d2c96e43e1b3bca3014cc87788eb8be5fb27b22cdc0a8a7274ba
3
+ size 13820769582
checkpoint-300/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
checkpoint-300/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-300/tokenizer_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "100000": {
6
+ "content": "<|begin▁of▁sentence|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "100001": {
14
+ "content": "<|end▁of▁sentence|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "100002": {
22
+ "content": "<unk>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<|begin▁of▁sentence|>",
31
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "<|end▁of▁sentence|>",
34
+ "legacy": true,
35
+ "model_max_length": 4096,
36
+ "pad_token": "<|end▁of▁sentence|>",
37
+ "padding_side": "left",
38
+ "sp_model_kwargs": {},
39
+ "tokenizer_class": "LlamaTokenizer",
40
+ "unk_token": "<unk>",
41
+ "use_default_system_prompt": false
42
+ }
checkpoint-300/training_config.yaml ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ds_cfg:
2
+ train_micro_batch_size_per_gpu: ${per_gpu_train_batch_size}
3
+ gradient_accumulation_steps: ${gradient_accumulation_steps}
4
+ scheduler:
5
+ type: WarmupDecayLR
6
+ params:
7
+ total_num_steps: 404
8
+ warmup_max_lr: ${learning_rate}
9
+ warmup_num_steps: 24
10
+ warmup_type: linear
11
+ optimizer:
12
+ type: AdamW
13
+ params:
14
+ lr: ${learning_rate}
15
+ betas:
16
+ - 0.9
17
+ - 0.95
18
+ eps: 1.0e-06
19
+ weight_decay: ${weight_decay}
20
+ bf16:
21
+ enabled: true
22
+ zero_optimization:
23
+ stage: 1
24
+ offload_optimizer:
25
+ device: cpu
26
+ pin_memory: true
27
+ stage3_param_persistence_threshold: 100000.0
28
+ stage3_max_live_parameters: 100000000.0
29
+ stage3_prefetch_bucket_size: 100000000.0
30
+ memory_efficient_linear: false
31
+ steps_per_print: 25
32
+ gradient_clipping: 1.0
33
+ prescale_gradients: false
34
+ sft_model_dir: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200
35
+ train_file: ${sft_model_dir}/meta_math_sub_math.55k.cot.n5.tem0.8.p0.9.v1.0_dsk_clean_v2.wrong_correct.v1.0.0shot.n5.tem1.0.decomposed.json
36
+ dev_file: null
37
+ test_file: null
38
+ torch_dtype:
39
+ _target_: general_util.training_utils.return_torch_dtype
40
+ dtype: bfloat16
41
+ tokenizer_init:
42
+ _target_: general_util.tokenization_utils.init_tokenizer
43
+ tokenizer_path: experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200
44
+ padding_side: left
45
+ device_map:
46
+ _target_: models.utils.return_single_device_map
47
+ model:
48
+ _target_: models.llama.LlamaForCausalLMDPO.from_pretrained_with_ref_model
49
+ beta: 0.5
50
+ gradient_checkpointing: true
51
+ attn_implementation: flash_attention_2
52
+ torch_dtype: ${torch_dtype}
53
+ device_map: ${device_map}
54
+ ref_model:
55
+ _target_: models.llama.LlamaForCausalLMDPO.from_pretrained
56
+ pretrained_model_name_or_path: ${model_name_or_path}
57
+ torch_dtype: ${torch_dtype}
58
+ attn_implementation: flash_attention_2
59
+ device_map: ${device_map}
60
+ read_tensor:
61
+ _target_: data.logic_combine.MultiMappingDataset
62
+ aligner:
63
+ _target_: data.input_aligner.concat_aligner
64
+ aligners:
65
+ - _target_: data.input_aligner.dpo_pair_aligner
66
+ pos_field: chosen_response
67
+ neg_field: reject_response
68
+ template:
69
+ chosen: '{text}{pos}<|end▁of▁sentence|>'
70
+ reject: '{text}{neg}<|end▁of▁sentence|>'
71
+ prompt: '{text}'
72
+ kv_mapping:
73
+ chosen: chosen
74
+ reject: reject
75
+ id: index
76
+ prompt: prompt
77
+ dist_load_data_barrier: false
78
+ extended_vocab: null
79
+ collator:
80
+ _target_: data.dpo.DPOCollator
81
+ tokenizer: ${tokenizer_init}
82
+ max_seq_length: 1024
83
+ num_workers: 8
84
+ prefetch_factor: 2
85
+ model_name_or_path: ${sft_model_dir}
86
+ pretrain: null
87
+ dp_size: 4
88
+ tp_size: 1
89
+ pp_size: 1
90
+ exp_name: deepseek-math.7b.ins.meta_math_cot.math55k.n5.critic_correct.dpo.H100.w4.v2.0.s${seed}
91
+ exp_notes: null
92
+ output_dir: experiments/${exp_name}
93
+ do_train: true
94
+ evaluate_during_training: false
95
+ do_eval: false
96
+ eval_sub_path: checkpoint-100
97
+ per_gpu_train_batch_size: 4
98
+ per_gpu_eval_batch_size: 4
99
+ learning_rate: 1.0e-06
100
+ gradient_accumulation_steps: 4
101
+ weight_decay: 0.1
102
+ adam_epsilon: 1.0e-06
103
+ adam_betas: (0.9, 0.98)
104
+ total_dataset_len: 25864
105
+ max_grad_norm: 1.0
106
+ num_train_epochs: 1
107
+ max_steps: 0
108
+ warmup_proportion: 0.06
109
+ warmup_steps: 0
110
+ optimizer: null
111
+ use_nvlamb: null
112
+ bit_training: null
113
+ logging_steps: 5
114
+ save_ds_state: false
115
+ save_steps: 100
116
+ save_best: false
117
+ eval_steps: 400
118
+ ddp_eval: true
119
+ no_cuda: false
120
+ seed: 42
121
+ local_rank: 0
122
+ fp16: true
123
+ fp16_opt_level: O1
124
+ fp16_bfloat16: true
125
+ prediction_cfg:
126
+ metric: loss
127
+ measure: -1
128
+ best_checkpoint: null
129
+ best_result: null
130
+ eval_forward_fn:
131
+ _target_: general_util.evaluator.DefaultForwardFn
132
+ post_process:
133
+ _target_: post_processors.dpo.DPOEvalPostProcessor
134
+ summary_helper:
135
+ _target_: general_util.tensorboard_helper.WandbWriter
136
+ batch_index_or_keys: null
137
+ outputs_index_or_keys:
138
+ train/chosen_reward: chosen_reward
139
+ train/rejected_reward: rejected_reward
140
+ n_gpu: 1
141
+ device: cuda:0
142
+ train_batch_size: 4
143
+ eval_batch_size: null
144
+ world_size: 4
checkpoint-400/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "experiments/deepseek-math.ins.7b.assess.sft.H100.w2.v2.2/checkpoint-200",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 100000,
9
+ "eos_token_id": 100001,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 11008,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 32,
17
+ "num_hidden_layers": 30,
18
+ "num_key_value_heads": 32,
19
+ "pad_token_id": 100001,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-06,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.40.1",
27
+ "use_cache": false,
28
+ "vocab_size": 102400
29
+ }
checkpoint-400/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "eos_token_id": 100001,
5
+ "pad_token_id": 100001,
6
+ "transformers_version": "4.40.1"
7
+ }
checkpoint-400/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-400/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-400/math500.test.wrong_correct.v1.0.0shot.n5.tem1.0.metrics.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "acc": 0.12681159420289856,
3
+ "pass@k": 0.2898550724637681,
4
+ "correct": 35,
5
+ "total": 276
6
+ }