assistant_tag: assistant | |
bf16: true | |
content_tag: value | |
cutoff_len: 4096 | |
dataset: mlfoundations-dev/s1_ablation_diversity_sampling_27k | |
dataset_dir: ONLINE | |
ddp_timeout: 180000000 | |
deepspeed: dcft/train/zero3.json | |
do_train: true | |
enable_liger_kernel: true | |
finetuning_type: full | |
formatting: sharegpt | |
global_batch_size: 96 | |
gradient_accumulation_steps: 6 | |
hub_model_id: mlfoundations-dev/qwen_s1ablation_diversity_sampling_27k | |
learning_rate: 1.0e-05 | |
logging_steps: 1 | |
lr_scheduler_type: cosine | |
max_samples: 1000000 | |
messages: conversations | |
model_name_or_path: Qwen/Qwen2.5-7B-Instruct | |
neat_packing: true | |
num_train_epochs: 3.0 | |
output_dir: /data/horse/ws/rehe951g-p_finetuning/checkpoints/qwen_s1ablation_diversity_sampling_27k | |
overwrite_cache: true | |
overwrite_output_dir: false | |
packing: true | |
per_device_eval_batch_size: 2 | |
per_device_train_batch_size: 2 | |
plot_loss: true | |
preprocessing_num_workers: 16 | |
push_to_db: true | |
push_to_hub: true | |
report_to: wandb | |
role_tag: from | |
run_name: qwen_s1ablation_diversity_sampling_27k | |
save_steps: 10 | |
stage: sft | |
template: qwen25 | |
user_tag: user | |
warmup_ratio: 0.1 | |