Upload configs.yaml with huggingface_hub
Browse files- configs.yaml +1 -3
configs.yaml
CHANGED
@@ -8,15 +8,13 @@ ddp_timeout: '180000000'
|
|
8 |
deepspeed: /opt/ml/code/zero3.json
|
9 |
do_train: 'True'
|
10 |
enable_liger_kernel: 'True'
|
11 |
-
finetuning_type:
|
12 |
formatting: sharegpt
|
13 |
global_batch_size: '96'
|
14 |
gradient_accumulation_steps: '3'
|
15 |
hub_model_id: mlfoundations-dev/ds_no_offload_liger_packing_torchcompile
|
16 |
learning_rate: 1e-05
|
17 |
logging_steps: '1'
|
18 |
-
lora_rank: '64'
|
19 |
-
lora_target: all
|
20 |
lr_scheduler_type: cosine
|
21 |
max_samples: '1000000'
|
22 |
messages: conversations
|
|
|
8 |
deepspeed: /opt/ml/code/zero3.json
|
9 |
do_train: 'True'
|
10 |
enable_liger_kernel: 'True'
|
11 |
+
finetuning_type: full
|
12 |
formatting: sharegpt
|
13 |
global_batch_size: '96'
|
14 |
gradient_accumulation_steps: '3'
|
15 |
hub_model_id: mlfoundations-dev/ds_no_offload_liger_packing_torchcompile
|
16 |
learning_rate: 1e-05
|
17 |
logging_steps: '1'
|
|
|
|
|
18 |
lr_scheduler_type: cosine
|
19 |
max_samples: '1000000'
|
20 |
messages: conversations
|