VideoReward / model_config.json
liuhuohuo's picture
Upload folder using huggingface_hub
2e08683 verified
{
"data_config": {
"meta_data": "",
"data_dir": "",
"meta_data_test": "",
"max_frame_pixels": 200704,
"num_frames": null,
"fps": 2.0,
"p_shuffle_frames": 0.0,
"p_color_jitter": 0.0,
"eval_dim": [
"VQ",
"MQ",
"TA"
],
"prompt_template_type": "detailed_special",
"add_noise": false,
"sample_type": "uniform",
"use_tied_data": true
},
"training_args": {
"output_dir": "",
"overwrite_output_dir": false,
"do_train": false,
"do_eval": true,
"do_predict": false,
"eval_strategy": "steps",
"prediction_loss_only": false,
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 4,
"per_gpu_train_batch_size": null,
"per_gpu_eval_batch_size": null,
"gradient_accumulation_steps": 4,
"eval_accumulation_steps": null,
"eval_delay": 0,
"torch_empty_cache_steps": null,
"learning_rate": 2e-06,
"weight_decay": 0.0,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-08,
"max_grad_norm": 1.0,
"num_train_epochs": 3.0,
"max_steps": -1,
"lr_scheduler_type": "constant_with_warmup",
"lr_scheduler_kwargs": {},
"warmup_ratio": 0.05,
"warmup_steps": 0,
"log_level": "passive",
"log_level_replica": "warning",
"log_on_each_node": true,
"logging_dir": "",
"logging_strategy": "steps",
"logging_first_step": false,
"logging_steps": 57,
"logging_nan_inf_filter": true,
"save_strategy": "steps",
"save_steps": 1419,
"save_total_limit": null,
"save_safetensors": true,
"save_on_each_node": false,
"save_only_model": true,
"restore_callback_states_from_checkpoint": false,
"no_cuda": false,
"use_cpu": false,
"use_mps_device": false,
"seed": 42,
"data_seed": null,
"jit_mode_eval": false,
"use_ipex": false,
"bf16": true,
"fp16": false,
"fp16_opt_level": "O1",
"half_precision_backend": "auto",
"bf16_full_eval": false,
"fp16_full_eval": false,
"tf32": null,
"ddp_backend": null,
"tpu_num_cores": null,
"tpu_metrics_debug": false,
"debug": [],
"dataloader_drop_last": false,
"eval_steps": 568,
"dataloader_num_workers": 8,
"dataloader_prefetch_factor": null,
"past_index": -1,
"run_name": "",
"disable_tqdm": false,
"remove_unused_columns": false,
"label_names": null,
"load_best_model_at_end": false,
"metric_for_best_model": null,
"greater_is_better": null,
"ignore_data_skip": false,
"fsdp": [],
"fsdp_min_num_params": 0,
"fsdp_config": {
"min_num_params": 0,
"xla": false,
"xla_fsdp_v2": false,
"xla_fsdp_grad_ckpt": false
},
"fsdp_transformer_layer_cls_to_wrap": null,
"accelerator_config": {
"split_batches": false,
"dispatch_batches": null,
"even_batches": true,
"use_seedable_sampler": true,
"non_blocking": false,
"gradient_accumulation_kwargs": null,
"use_configured_state": false
},
"deepspeed": "ds_config/zero0.json",
"label_smoothing_factor": 0.0,
"optim": "adamw_torch",
"optim_args": null,
"adafactor": false,
"group_by_length": false,
"length_column_name": "length",
"report_to": [
"tensorboard"
],
"ddp_find_unused_parameters": null,
"ddp_bucket_cap_mb": null,
"ddp_broadcast_buffers": null,
"dataloader_pin_memory": true,
"dataloader_persistent_workers": false,
"skip_memory_metrics": true,
"use_legacy_prediction_loop": false,
"push_to_hub": false,
"resume_from_checkpoint": null,
"hub_model_id": null,
"hub_strategy": "every_save",
"hub_token": null,
"hub_private_repo": false,
"hub_always_push": false,
"gradient_checkpointing": false,
"gradient_checkpointing_kwargs": null,
"include_inputs_for_metrics": false,
"eval_do_concat_batches": true,
"fp16_backend": "auto",
"evaluation_strategy": null,
"push_to_hub_model_id": null,
"push_to_hub_organization": null,
"push_to_hub_token": null,
"mp_parameters": "",
"auto_find_batch_size": false,
"full_determinism": false,
"torchdynamo": null,
"ray_scope": "last",
"ddp_timeout": 1800,
"torch_compile": false,
"torch_compile_backend": null,
"torch_compile_mode": null,
"dispatch_batches": null,
"split_batches": null,
"include_tokens_per_second": false,
"include_num_input_tokens_seen": false,
"neftune_noise_alpha": null,
"optim_target_modules": null,
"batch_eval_metrics": false,
"eval_on_start": false,
"use_liger_kernel": false,
"eval_use_gather_object": false,
"max_length": 6144,
"dataset_num_proc": null,
"center_rewards_coefficient": null,
"disable_flash_attn2": false,
"vision_lr": 2e-06,
"merger_lr": 2e-06,
"special_token_lr": 2e-06,
"conduct_eval": true,
"load_from_pretrained": null,
"load_from_pretrained_step": null,
"logging_epochs": 0.01,
"eval_epochs": 0.1,
"save_epochs": 0.25,
"save_full_model": false
},
"model_config": {
"model_name_or_path": "Qwen/Qwen2-VL-2B-Instruct",
"model_revision": "main",
"output_dim": 1,
"use_special_tokens": true,
"freeze_vision_tower": false,
"freeze_llm": false,
"tune_merger": true,
"torch_dtype": "bfloat16",
"trust_remote_code": false,
"attn_implementation": null,
"load_in_8bit": false,
"load_in_4bit": false,
"bnb_4bit_quant_type": "nf4",
"use_bnb_nested_quant": false,
"reward_token": "special",
"loss_type": "btt"
},
"peft_lora_config": {
"lora_enable": true,
"vision_lora": false,
"lora_r": 64,
"lora_alpha": 128,
"lora_dropout": 0.05,
"lora_target_modules": null,
"lora_namespan_exclude": [
"lm_head",
"rm_head",
"embed_tokens",
"visual"
],
"lora_modules_to_save": null,
"lora_task_type": "CAUSAL_LM",
"use_rslora": false,
"num_lora_modules": -1
},
"inference_config": {
"VQ_mean": 3.6757,
"VQ_std": 2.2476,
"MQ_mean": 1.1646,
"MQ_std": 1.3811,
"TA_mean": 2.8105,
"TA_std": 2.5121
}
}