Upload configs.yaml with huggingface_hub
Browse files- configs.yaml +42 -0
configs.yaml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
adam_beta1: 0.9
|
| 2 |
+
adam_beta2: 0.999
|
| 3 |
+
bf16: true
|
| 4 |
+
cutoff_len: 2048
|
| 5 |
+
dataset: mlfoundations-dev/stackexchange_reverseengineering
|
| 6 |
+
dataset_dir: ONLINE
|
| 7 |
+
ddp_timeout: 180000000
|
| 8 |
+
deepspeed: dcft/train/zero3.json
|
| 9 |
+
do_train: true
|
| 10 |
+
enable_liger_kernel: false
|
| 11 |
+
eval_strategy: epoch
|
| 12 |
+
finetuning_type: full
|
| 13 |
+
formatting: sharegpt
|
| 14 |
+
global_batch_size: 512
|
| 15 |
+
gradient_accumulation_steps: 16
|
| 16 |
+
gradient_checkpointing: true
|
| 17 |
+
hub_model_id: mlfoundations-dev/llama3-1_8b_mlfoundations-dev-stackexchange_reverseengineering
|
| 18 |
+
include_hp: dcft/train/hp_settings/hritik.yaml
|
| 19 |
+
learning_rate: 5.0e-06
|
| 20 |
+
logging_steps: 10
|
| 21 |
+
lr_scheduler_type: constant
|
| 22 |
+
max_grad_norm: 1
|
| 23 |
+
messages: conversations
|
| 24 |
+
model_name_or_path: meta-llama/Meta-Llama-3.1-8B
|
| 25 |
+
neat_packing: true
|
| 26 |
+
num_train_epochs: 3.0
|
| 27 |
+
output_dir: trained_models/llama3-1_8b_mlfoundations-dev-stackexchange_reverseengineering
|
| 28 |
+
overwrite_cache: true
|
| 29 |
+
overwrite_output_dir: true
|
| 30 |
+
packing: true
|
| 31 |
+
per_device_train_batch_size: 8
|
| 32 |
+
plot_loss: true
|
| 33 |
+
preprocessing_num_workers: 16
|
| 34 |
+
push_to_db: true
|
| 35 |
+
push_to_hub: true
|
| 36 |
+
report_to: wandb
|
| 37 |
+
run_name: llama3-1_8b_mlfoundations-dev-stackexchange_reverseengineering
|
| 38 |
+
save_strategy: epoch
|
| 39 |
+
stage: sft
|
| 40 |
+
template: llama3
|
| 41 |
+
val_size: 0.05
|
| 42 |
+
weight_decay: 0.1
|