| assistant_tag: gpt | |
| bf16: true | |
| content_tag: value | |
| cutoff_len: 16384 | |
| dataloader_num_workers: 4 | |
| dataloader_persistent_workers: true | |
| dataloader_pin_memory: true | |
| dataset: neginr/multisubject_compsci_mc | |
| dataset_dir: ONLINE | |
| ddp_timeout: 180000000 | |
| deepspeed: dcft/train/zero3.json | |
| do_train: true | |
| enable_liger_kernel: true | |
| finetuning_type: full | |
| formatting: sharegpt | |
| global_batch_size: 96 | |
| gradient_accumulation_steps: 3 | |
| hub_model_id: neginr/multisubject_compsci_mc | |
| include_hp: dcft/train/hp_settings/paper/reasoning_small.yaml | |
| learning_rate: 2.0e-05 | |
| logging_steps: 1 | |
| lr_scheduler_type: cosine | |
| messages: conversations | |
| model_name_or_path: Qwen/Qwen2.5-7B-Instruct | |
| num_train_epochs: 7.0 | |
| output_dir: /scratch/08134/negin/dcft_checkpoints/r1_annotated_5k_compsci | |
| overwrite_cache: true | |
| per_device_train_batch_size: 1 | |
| plot_loss: true | |
| preprocessing_num_workers: 16 | |
| push_to_db: true | |
| push_to_hub: true | |
| report_to: wandb | |
| role_tag: from | |
| run_name: r1_annotated_5k_compsci | |
| save_strategy: epoch | |
| stage: sft | |
| template: qwen25 | |
| user_tag: human | |
| warmup_ratio: 0.1 | |