Upload folder using huggingface_hub
Browse files- ablations/half/best.pt +3 -0
- ablations/half/config.yaml +24 -0
- ablations/half/debug.txt +0 -0
- ablations/half/last.pt +3 -0
- ablations/half/log.txt +0 -0
- ablations/large/best.pt +3 -0
- ablations/large/config.yaml +23 -0
- ablations/large/debug.txt +0 -0
- ablations/large/last.pt +3 -0
- ablations/large/log.txt +0 -0
- ablations/short/best.pt +3 -0
- ablations/short/config.yaml +23 -0
- ablations/short/debug.txt +0 -0
- ablations/short/last.pt +3 -0
- ablations/short/log.txt +0 -0
- ablations/small/best.pt +3 -0
- ablations/small/config.yaml +23 -0
- ablations/small/debug.txt +0 -0
- ablations/small/last.pt +3 -0
- ablations/small/log.txt +0 -0
- medium/best.pt +3 -0
- medium/config.yaml +23 -0
- medium/debug.txt +0 -0
- medium/last.pt +3 -0
- medium/log.txt +0 -0
ablations/half/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2adde01507aa606af47691ef052fd3f226247c3cdccf2316fe69de975e99bbaa
|
3 |
+
size 3665220334
|
ablations/half/config.yaml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
trainer_config:
|
2 |
+
output_dir: models/ablations/half
|
3 |
+
wandb_run_name: half-medium
|
4 |
+
wandb_run_id: half-medium
|
5 |
+
dtype: bfloat16
|
6 |
+
torch_compile: true
|
7 |
+
batch_size: 32
|
8 |
+
gradient_accumulation_steps: 1
|
9 |
+
max_iters: 2000000
|
10 |
+
decay_iters: 2000000
|
11 |
+
eval_interval: 10000
|
12 |
+
log_interval: 50
|
13 |
+
save_interval: 10000
|
14 |
+
min_lr: 1.0e-05
|
15 |
+
weight_decay: 0.01
|
16 |
+
data_config:
|
17 |
+
tokenizer_config:
|
18 |
+
max_length: 512
|
19 |
+
train_file: data/lichess-2022-blitz-train/2022-half.bin
|
20 |
+
model_config:
|
21 |
+
base_model: gpt2-medium
|
22 |
+
use_control_token: true
|
23 |
+
use_regression_head: true
|
24 |
+
use_pretrained: true
|
ablations/half/debug.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/half/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5b2214db047864da4c3e50edca126079e85545b21962207ab0165b50a176ac4
|
3 |
+
size 3665220334
|
ablations/half/log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/large/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4805761d8652ca91080232d7ee0b662d4940122aafe6b6f4d2757a9e15111a20
|
3 |
+
size 8547639802
|
ablations/large/config.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
trainer_config:
|
2 |
+
output_dir: models/ablations/large
|
3 |
+
wandb_run_name: full-large
|
4 |
+
wandb_run_id: full-large
|
5 |
+
dtype: bfloat16
|
6 |
+
torch_compile: true
|
7 |
+
batch_size: 32
|
8 |
+
gradient_accumulation_steps: 1
|
9 |
+
max_iters: 2000000
|
10 |
+
decay_iters: 2000000
|
11 |
+
eval_interval: 10000
|
12 |
+
log_interval: 50
|
13 |
+
save_interval: 10000
|
14 |
+
min_lr: 1.0e-05
|
15 |
+
weight_decay: 0.01
|
16 |
+
data_config:
|
17 |
+
tokenizer_config:
|
18 |
+
max_length: 512
|
19 |
+
model_config:
|
20 |
+
base_model: gpt2-large
|
21 |
+
use_control_token: true
|
22 |
+
use_regression_head: true
|
23 |
+
use_pretrained: true
|
ablations/large/debug.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/large/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0450a902d271a57dfef21d062c3aba7f624fef87f04c58b9e77acc17ee049d95
|
3 |
+
size 8547639802
|
ablations/large/log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/short/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c015f5bb36e5ddc70e23cb0688a74a5e0dd27c6646d137b2fd3493561202a806
|
3 |
+
size 3665220334
|
ablations/short/config.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
trainer_config:
|
2 |
+
output_dir: models/ablations/short
|
3 |
+
wandb_run_name: short-medium
|
4 |
+
wandb_run_id: short-medium
|
5 |
+
dtype: bfloat16
|
6 |
+
torch_compile: true
|
7 |
+
batch_size: 32
|
8 |
+
gradient_accumulation_steps: 1
|
9 |
+
max_iters: 1000000
|
10 |
+
decay_iters: 1000000
|
11 |
+
eval_interval: 10000
|
12 |
+
log_interval: 50
|
13 |
+
save_interval: 10000
|
14 |
+
min_lr: 1.0e-05
|
15 |
+
weight_decay: 0.01
|
16 |
+
data_config:
|
17 |
+
tokenizer_config:
|
18 |
+
max_length: 512
|
19 |
+
model_config:
|
20 |
+
base_model: gpt2-medium
|
21 |
+
use_control_token: true
|
22 |
+
use_regression_head: true
|
23 |
+
use_pretrained: true
|
ablations/short/debug.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/short/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1514d264dc1e4e1daad95f890567ab84c49bf7f395c789ff5202f520c72c32f
|
3 |
+
size 3665220334
|
ablations/short/log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/small/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ed031288282b6a83cd7290ff4afcf1ad0b8c3c3d90e009208d14732a03a08d4
|
3 |
+
size 1048711282
|
ablations/small/config.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
trainer_config:
|
2 |
+
output_dir: models/ablations/small
|
3 |
+
wandb_run_name: full-small
|
4 |
+
wandb_run_id: full-small
|
5 |
+
dtype: bfloat16
|
6 |
+
torch_compile: true
|
7 |
+
batch_size: 32
|
8 |
+
gradient_accumulation_steps: 1
|
9 |
+
max_iters: 2000000
|
10 |
+
decay_iters: 2000000
|
11 |
+
eval_interval: 10000
|
12 |
+
log_interval: 50
|
13 |
+
save_interval: 10000
|
14 |
+
min_lr: 1.0e-05
|
15 |
+
weight_decay: 0.01
|
16 |
+
data_config:
|
17 |
+
tokenizer_config:
|
18 |
+
max_length: 512
|
19 |
+
model_config:
|
20 |
+
base_model: gpt2
|
21 |
+
use_control_token: true
|
22 |
+
use_regression_head: true
|
23 |
+
use_pretrained: true
|
ablations/small/debug.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ablations/small/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b991d3b85830365a1bd7dff01a70be0e54b6854d40dadf19b97194888dde1b72
|
3 |
+
size 1048711282
|
ablations/small/log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
medium/best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e64e8862ca630dd1b8cc0b3ff9d6dfa78014ea2c93247192b8f6e835ecf4f523
|
3 |
+
size 3665220334
|
medium/config.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
trainer_config:
|
2 |
+
output_dir: models/medium
|
3 |
+
wandb_run_name: full-medium
|
4 |
+
wandb_run_id: full-medium
|
5 |
+
dtype: bfloat16
|
6 |
+
torch_compile: true
|
7 |
+
batch_size: 32
|
8 |
+
gradient_accumulation_steps: 1
|
9 |
+
max_iters: 2000000
|
10 |
+
decay_iters: 2000000
|
11 |
+
eval_interval: 10000
|
12 |
+
log_interval: 50
|
13 |
+
save_interval: 10000
|
14 |
+
min_lr: 1.0e-05
|
15 |
+
weight_decay: 0.01
|
16 |
+
data_config:
|
17 |
+
tokenizer_config:
|
18 |
+
max_length: 512
|
19 |
+
model_config:
|
20 |
+
base_model: gpt2-medium
|
21 |
+
use_control_token: true
|
22 |
+
use_regression_head: true
|
23 |
+
use_pretrained: true
|
medium/debug.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
medium/last.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc29750d3392fb46f0f11af8d66b4be2dbe86592cae2a0712b23ca84d0342203
|
3 |
+
size 3665220334
|
medium/log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|