Training in progress, step 16, checkpoint
Browse files- checkpoint-16/config.json +32 -0
- checkpoint-16/generation_config.json +6 -0
- checkpoint-16/logdata.jsonl +0 -0
- checkpoint-16/model.safetensors +3 -0
- checkpoint-16/optimizer.pt +3 -0
- checkpoint-16/rng_state.pth +3 -0
- checkpoint-16/scheduler.pt +3 -0
- checkpoint-16/special_tokens_map.json +23 -0
- checkpoint-16/tokenizer.json +0 -0
- checkpoint-16/tokenizer_config.json +31 -0
- checkpoint-16/trainer_state.json +78 -0
- checkpoint-16/training_args.bin +3 -0
checkpoint-16/config.json
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "bowphs/pythia-70m-multi",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": true,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"bos_token_id": 0,
|
9 |
+
"classifier_dropout": 0.1,
|
10 |
+
"eos_token_id": 0,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout": 0.0,
|
13 |
+
"hidden_size": 512,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"intermediate_size": 2048,
|
16 |
+
"layer_norm_eps": 1e-05,
|
17 |
+
"max_position_embeddings": 2048,
|
18 |
+
"model_type": "gpt_neox",
|
19 |
+
"num_attention_heads": 8,
|
20 |
+
"num_hidden_layers": 6,
|
21 |
+
"partial_rotary_factor": 0.25,
|
22 |
+
"rope_scaling": null,
|
23 |
+
"rope_theta": 10000,
|
24 |
+
"rotary_emb_base": 10000,
|
25 |
+
"rotary_pct": 0.25,
|
26 |
+
"tie_word_embeddings": false,
|
27 |
+
"torch_dtype": "float32",
|
28 |
+
"transformers_version": "4.48.0.dev0",
|
29 |
+
"use_cache": true,
|
30 |
+
"use_parallel_residual": true,
|
31 |
+
"vocab_size": 50304
|
32 |
+
}
|
checkpoint-16/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"eos_token_id": 0,
|
5 |
+
"transformers_version": "4.48.0.dev0"
|
6 |
+
}
|
checkpoint-16/logdata.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-16/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2d5f6e28bb6abef7c9625a752808dc2b4bd5bb435ca6a3730eef0a486d25104
|
3 |
+
size 281715176
|
checkpoint-16/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1796f13097573fb7d53958e9f56f1fc7e0c42d6f7fa586820cd4fc50c8852faa
|
3 |
+
size 563476858
|
checkpoint-16/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9429263b0385dbb24b4099d2ec0c42dbf51c21b878a978e5e334af1a8a21e62f
|
3 |
+
size 14244
|
checkpoint-16/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e396a512e00c412da3cea173aaefbed5a9acca0afabe05d5d52cdf6c81f108b2
|
3 |
+
size 1064
|
checkpoint-16/special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<|endoftext|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
checkpoint-16/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-16/tokenizer_config.json
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": false,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"0": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"1": {
|
15 |
+
"content": "<|padding|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
}
|
22 |
+
},
|
23 |
+
"bos_token": "<|endoftext|>",
|
24 |
+
"clean_up_tokenization_spaces": false,
|
25 |
+
"eos_token": "<|endoftext|>",
|
26 |
+
"extra_special_tokens": {},
|
27 |
+
"model_max_length": 1000000000000000019884624838656,
|
28 |
+
"pad_token": null,
|
29 |
+
"tokenizer_class": "GPTNeoXTokenizer",
|
30 |
+
"unk_token": "<|endoftext|>"
|
31 |
+
}
|
checkpoint-16/trainer_state.json
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.0005333333333333334,
|
5 |
+
"eval_steps": 2000,
|
6 |
+
"global_step": 16,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 3.3333333333333335e-05,
|
13 |
+
"eval_accuracy": 0.016437927663734114,
|
14 |
+
"eval_loss": 10.702861785888672,
|
15 |
+
"eval_runtime": 53.5239,
|
16 |
+
"eval_samples_per_second": 93.416,
|
17 |
+
"eval_steps_per_second": 2.933,
|
18 |
+
"step": 1
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"epoch": 6.666666666666667e-05,
|
22 |
+
"eval_accuracy": 0.049564027370478984,
|
23 |
+
"eval_loss": 10.533119201660156,
|
24 |
+
"eval_runtime": 53.3666,
|
25 |
+
"eval_samples_per_second": 93.692,
|
26 |
+
"eval_steps_per_second": 2.942,
|
27 |
+
"step": 2
|
28 |
+
},
|
29 |
+
{
|
30 |
+
"epoch": 0.00013333333333333334,
|
31 |
+
"eval_accuracy": 0.05325317693059629,
|
32 |
+
"eval_loss": 10.302159309387207,
|
33 |
+
"eval_runtime": 53.3789,
|
34 |
+
"eval_samples_per_second": 93.67,
|
35 |
+
"eval_steps_per_second": 2.941,
|
36 |
+
"step": 4
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"epoch": 0.0002666666666666667,
|
40 |
+
"eval_accuracy": 0.053565591397849464,
|
41 |
+
"eval_loss": 10.023477554321289,
|
42 |
+
"eval_runtime": 53.6667,
|
43 |
+
"eval_samples_per_second": 93.168,
|
44 |
+
"eval_steps_per_second": 2.925,
|
45 |
+
"step": 8
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"epoch": 0.0005333333333333334,
|
49 |
+
"eval_accuracy": 0.06345063538611925,
|
50 |
+
"eval_loss": 9.653569221496582,
|
51 |
+
"eval_runtime": 53.2342,
|
52 |
+
"eval_samples_per_second": 93.925,
|
53 |
+
"eval_steps_per_second": 2.949,
|
54 |
+
"step": 16
|
55 |
+
}
|
56 |
+
],
|
57 |
+
"logging_steps": 2000,
|
58 |
+
"max_steps": 30000,
|
59 |
+
"num_input_tokens_seen": 0,
|
60 |
+
"num_train_epochs": 9223372036854775807,
|
61 |
+
"save_steps": 500,
|
62 |
+
"stateful_callbacks": {
|
63 |
+
"TrainerControl": {
|
64 |
+
"args": {
|
65 |
+
"should_epoch_stop": false,
|
66 |
+
"should_evaluate": false,
|
67 |
+
"should_log": false,
|
68 |
+
"should_save": true,
|
69 |
+
"should_training_stop": false
|
70 |
+
},
|
71 |
+
"attributes": {}
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"total_flos": 140522739990528.0,
|
75 |
+
"train_batch_size": 32,
|
76 |
+
"trial_name": null,
|
77 |
+
"trial_params": null
|
78 |
+
}
|
checkpoint-16/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bfb964096776c793ae319346657629d176228270098861d014f2f3c248437f7
|
3 |
+
size 5304
|