gsmyrnis commited on
Commit
378f4d4
·
verified ·
1 Parent(s): 4efbb76

Upload configs.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. configs.yaml +34 -0
configs.yaml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ bf16: true
2
+ cutoff_len: 16384
3
+ dataset: mlfoundations-dev/sky_t1_2-5k_rewrite_r1_distill_llama70b
4
+ dataset_dir: ONLINE
5
+ ddp_timeout: 180000000
6
+ deepspeed: dcft/train/zero3.json
7
+ do_train: true
8
+ eval_strategy: 'no'
9
+ finetuning_type: full
10
+ formatting: sharegpt
11
+ global_batch_size: 96
12
+ gradient_accumulation_steps: 3
13
+ hub_model_id: mlfoundations-dev/qwen2-5_sky_t1_2-5k_rewrite_r1_distill_llama70b
14
+ include_hp: dcft/train/hp_settings/reasoning.yaml
15
+ learning_rate: 1.0e-05
16
+ logging_steps: 1
17
+ lr_scheduler_type: cosine
18
+ max_samples: 1000000
19
+ messages: conversations
20
+ model_name_or_path: Qwen/Qwen2.5-7B-Instruct
21
+ num_train_epochs: 3.0
22
+ output_dir: /tmp/dcft_checkpoints/train/checkpoints/qwen2-5_sky_t1_2-5k_rewrite_r1_distill_llama70b
23
+ overwrite_cache: true
24
+ per_device_train_batch_size: 1
25
+ plot_loss: true
26
+ preprocessing_num_workers: 16
27
+ push_to_db: true
28
+ push_to_hub: true
29
+ report_to: wandb
30
+ run_name: qwen2-5_sky_t1_2-5k_rewrite_r1_distill_llama70b
31
+ save_strategy: epoch
32
+ stage: sft
33
+ template: qwen25
34
+ warmup_ratio: 0.1