bowphs commited on
Commit
c526050
·
verified ·
1 Parent(s): 1f4e40a

Training in progress, step 4096, checkpoint

Browse files
.gitattributes CHANGED
@@ -47,3 +47,4 @@ checkpoint-2500/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
47
  checkpoint-3000/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
48
  checkpoint-3500/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
49
  checkpoint-4000/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
47
  checkpoint-3000/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
48
  checkpoint-3500/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
49
  checkpoint-4000/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
50
+ checkpoint-4096/logdata.jsonl filter=lfs diff=lfs merge=lfs -text
checkpoint-4096/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bowphs/pythia-70m-multi",
3
+ "architectures": [
4
+ "GPTNeoXForCausalLM"
5
+ ],
6
+ "attention_bias": true,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 0,
9
+ "classifier_dropout": 0.1,
10
+ "eos_token_id": 0,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout": 0.0,
13
+ "hidden_size": 512,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 2048,
16
+ "layer_norm_eps": 1e-05,
17
+ "max_position_embeddings": 2048,
18
+ "model_type": "gpt_neox",
19
+ "num_attention_heads": 8,
20
+ "num_hidden_layers": 6,
21
+ "partial_rotary_factor": 0.25,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000,
24
+ "rotary_emb_base": 10000,
25
+ "rotary_pct": 0.25,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.48.0.dev0",
29
+ "use_cache": true,
30
+ "use_parallel_residual": true,
31
+ "vocab_size": 50304
32
+ }
checkpoint-4096/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.48.0.dev0"
6
+ }
checkpoint-4096/logdata.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72bb49b3bbe223e3d6b7462b722ebb89ffb692cfd12122302ec7b603fec4f57e
3
+ size 742003551
checkpoint-4096/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:635cc5ec12c2ed84c71963eea31a74903f1048f4bc8132aeaa0a5e806cad9b1f
3
+ size 281715176
checkpoint-4096/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f85c9c280466c378390d7296392a34f6b3ebe5ae91235a4d29dcd615651d339
3
+ size 563476858
checkpoint-4096/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:695a751857db641dd41fefce683c88bc6bcb8f21c221a48580f6a593752993f6
3
+ size 14244
checkpoint-4096/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f509d1b72f3ba479772e87c45dc04b025014051f410bea0fdfee306adc4e4c0
3
+ size 1064
checkpoint-4096/special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
checkpoint-4096/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4096/tokenizer_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<|padding|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ }
22
+ },
23
+ "bos_token": "<|endoftext|>",
24
+ "clean_up_tokenization_spaces": false,
25
+ "eos_token": "<|endoftext|>",
26
+ "extra_special_tokens": {},
27
+ "model_max_length": 1000000000000000019884624838656,
28
+ "pad_token": null,
29
+ "tokenizer_class": "GPTNeoXTokenizer",
30
+ "unk_token": "<|endoftext|>"
31
+ }
checkpoint-4096/trainer_state.json ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.13653333333333334,
5
+ "eval_steps": 2000,
6
+ "global_step": 4096,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 3.3333333333333335e-05,
13
+ "eval_accuracy": 0.016437927663734114,
14
+ "eval_loss": 10.702861785888672,
15
+ "eval_runtime": 53.5239,
16
+ "eval_samples_per_second": 93.416,
17
+ "eval_steps_per_second": 2.933,
18
+ "step": 1
19
+ },
20
+ {
21
+ "epoch": 6.666666666666667e-05,
22
+ "eval_accuracy": 0.049564027370478984,
23
+ "eval_loss": 10.533119201660156,
24
+ "eval_runtime": 53.3666,
25
+ "eval_samples_per_second": 93.692,
26
+ "eval_steps_per_second": 2.942,
27
+ "step": 2
28
+ },
29
+ {
30
+ "epoch": 0.00013333333333333334,
31
+ "eval_accuracy": 0.05325317693059629,
32
+ "eval_loss": 10.302159309387207,
33
+ "eval_runtime": 53.3789,
34
+ "eval_samples_per_second": 93.67,
35
+ "eval_steps_per_second": 2.941,
36
+ "step": 4
37
+ },
38
+ {
39
+ "epoch": 0.0002666666666666667,
40
+ "eval_accuracy": 0.053565591397849464,
41
+ "eval_loss": 10.023477554321289,
42
+ "eval_runtime": 53.6667,
43
+ "eval_samples_per_second": 93.168,
44
+ "eval_steps_per_second": 2.925,
45
+ "step": 8
46
+ },
47
+ {
48
+ "epoch": 0.0005333333333333334,
49
+ "eval_accuracy": 0.06345063538611925,
50
+ "eval_loss": 9.653569221496582,
51
+ "eval_runtime": 53.2342,
52
+ "eval_samples_per_second": 93.925,
53
+ "eval_steps_per_second": 2.949,
54
+ "step": 16
55
+ },
56
+ {
57
+ "epoch": 0.0010666666666666667,
58
+ "eval_accuracy": 0.07593020527859237,
59
+ "eval_loss": 9.028414726257324,
60
+ "eval_runtime": 53.4229,
61
+ "eval_samples_per_second": 93.593,
62
+ "eval_steps_per_second": 2.939,
63
+ "step": 32
64
+ },
65
+ {
66
+ "epoch": 0.0021333333333333334,
67
+ "eval_accuracy": 0.08323401759530792,
68
+ "eval_loss": 8.02489185333252,
69
+ "eval_runtime": 53.5928,
70
+ "eval_samples_per_second": 93.296,
71
+ "eval_steps_per_second": 2.929,
72
+ "step": 64
73
+ },
74
+ {
75
+ "epoch": 0.004266666666666667,
76
+ "eval_accuracy": 0.11289071358748778,
77
+ "eval_loss": 6.917232990264893,
78
+ "eval_runtime": 53.9308,
79
+ "eval_samples_per_second": 92.711,
80
+ "eval_steps_per_second": 2.911,
81
+ "step": 128
82
+ },
83
+ {
84
+ "epoch": 0.008533333333333334,
85
+ "eval_accuracy": 0.15581857282502443,
86
+ "eval_loss": 6.162940502166748,
87
+ "eval_runtime": 53.873,
88
+ "eval_samples_per_second": 92.811,
89
+ "eval_steps_per_second": 2.914,
90
+ "step": 256
91
+ },
92
+ {
93
+ "epoch": 0.017066666666666667,
94
+ "eval_accuracy": 0.18169071358748778,
95
+ "eval_loss": 5.580474376678467,
96
+ "eval_runtime": 53.993,
97
+ "eval_samples_per_second": 92.605,
98
+ "eval_steps_per_second": 2.908,
99
+ "step": 512
100
+ },
101
+ {
102
+ "epoch": 0.034133333333333335,
103
+ "eval_accuracy": 0.20284437927663734,
104
+ "eval_loss": 5.123527526855469,
105
+ "eval_runtime": 53.733,
106
+ "eval_samples_per_second": 93.053,
107
+ "eval_steps_per_second": 2.922,
108
+ "step": 1024
109
+ },
110
+ {
111
+ "epoch": 0.06666666666666667,
112
+ "grad_norm": 0.9611704349517822,
113
+ "learning_rate": 4.666666666666667e-05,
114
+ "loss": 5.4529,
115
+ "step": 2000
116
+ },
117
+ {
118
+ "epoch": 0.06666666666666667,
119
+ "eval_accuracy": 0.2264314760508309,
120
+ "eval_loss": 4.761301517486572,
121
+ "eval_runtime": 55.2965,
122
+ "eval_samples_per_second": 90.422,
123
+ "eval_steps_per_second": 2.839,
124
+ "step": 2000
125
+ },
126
+ {
127
+ "epoch": 0.06826666666666667,
128
+ "eval_accuracy": 0.22808172043010752,
129
+ "eval_loss": 4.7480597496032715,
130
+ "eval_runtime": 54.5009,
131
+ "eval_samples_per_second": 91.742,
132
+ "eval_steps_per_second": 2.881,
133
+ "step": 2048
134
+ },
135
+ {
136
+ "epoch": 0.13333333333333333,
137
+ "grad_norm": 0.9782880544662476,
138
+ "learning_rate": 4.3333333333333334e-05,
139
+ "loss": 4.5765,
140
+ "step": 4000
141
+ },
142
+ {
143
+ "epoch": 0.13333333333333333,
144
+ "eval_accuracy": 0.2610080156402737,
145
+ "eval_loss": 4.412333011627197,
146
+ "eval_runtime": 55.0769,
147
+ "eval_samples_per_second": 90.782,
148
+ "eval_steps_per_second": 2.851,
149
+ "step": 4000
150
+ },
151
+ {
152
+ "epoch": 0.13653333333333334,
153
+ "eval_accuracy": 0.2625071358748778,
154
+ "eval_loss": 4.404272556304932,
155
+ "eval_runtime": 54.9058,
156
+ "eval_samples_per_second": 91.065,
157
+ "eval_steps_per_second": 2.859,
158
+ "step": 4096
159
+ }
160
+ ],
161
+ "logging_steps": 2000,
162
+ "max_steps": 30000,
163
+ "num_input_tokens_seen": 0,
164
+ "num_train_epochs": 9223372036854775807,
165
+ "save_steps": 500,
166
+ "stateful_callbacks": {
167
+ "TrainerControl": {
168
+ "args": {
169
+ "should_epoch_stop": false,
170
+ "should_evaluate": false,
171
+ "should_log": false,
172
+ "should_save": true,
173
+ "should_training_stop": false
174
+ },
175
+ "attributes": {}
176
+ }
177
+ },
178
+ "total_flos": 3.597382143757517e+16,
179
+ "train_batch_size": 32,
180
+ "trial_name": null,
181
+ "trial_params": null
182
+ }
checkpoint-4096/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bfb964096776c793ae319346657629d176228270098861d014f2f3c248437f7
3
+ size 5304