# | |
# Copyright 2024 PKU-Alignment Team. All Rights Reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# ============================================================================== | |
# Initialize variables | |
MODEL_NAME_OR_PATH="/data/align-anything/hantao/models/0916_ti_to_ti_sft" | |
TRAIN_DATASETS="/data/align-anything/hantao/align-anything/projects/text_image_to_text_image/outputs" | |
OUTPUT_DIR="../outputs/dpo_ti2ti_llf_1111_6.75k_400" | |
# For wandb online logging | |
export WANDB_API_KEY="7e2dcc0c310ebcb7cdcafd5e9320d6be55cf1a33" | |
export WANDB_MODE="online" | |
# Source the setup script | |
source ./setup.sh | |
# Execute deepspeed command | |
deepspeed \ | |
--master_port ${MASTER_PORT} \ | |
--module align_anything.trainers.text_image_to_text_image.dpo \ | |
--model_name_or_path ${MODEL_NAME_OR_PATH} \ | |
--train_datasets ${TRAIN_DATASETS} \ | |
--output_dir ${OUTPUT_DIR} \ | |
--per_device_train_batch_size 2 \ | |
--per_device_eval_batch_size 2 \ | |
--gradient_accumulation_steps 2 \ | |
--train_template Chameleon_preference \ | |
--train_split train \ | |
--train_data_files ti2ti_llf_1111_6.75k_400.pt \ | |
--learning_rate 5e-7 \ | |
--epochs 3 \ | |
--lr_scheduler_type cosine \ | |
--save_interval 400 | |
# # Initialize variables | |
# MODEL_NAME_OR_PATH="/data/align-anything/hantao/models/0916_ti_to_ti_sft" | |
# TRAIN_DATASETS="/data/align-anything/hantao/align-anything/projects/text_image_to_text_image/outputs" | |
# OUTPUT_DIR="../outputs/dpo_ti2ti_llf_1029_washed_re_train" | |
# # For wandb online logging | |
# export WANDB_API_KEY="7e2dcc0c310ebcb7cdcafd5e9320d6be55cf1a33" | |
# export WANDB_MODE="online" | |
# # Source the setup script | |
# source ./setup.sh | |
# # Execute deepspeed command | |
# deepspeed \ | |
# --master_port ${MASTER_PORT} \ | |
# --module align_anything.trainers.text_image_to_text_image.dpo \ | |
# --model_name_or_path ${MODEL_NAME_OR_PATH} \ | |
# --train_datasets ${TRAIN_DATASETS} \ | |
# --output_dir ${OUTPUT_DIR} \ | |
# --per_device_train_batch_size 2 \ | |
# --per_device_eval_batch_size 2 \ | |
# --gradient_accumulation_steps 2 \ | |
# --train_template Chameleon_preference \ | |
# --train_split train \ | |
# --train_data_files ti2ti_llf_preference_filtered_tokenize.pt \ | |
# --learning_rate 5e-7 \ | |
# --epochs 3 \ | |
# --lr_scheduler_type cosine \ | |
# --save_interval 400 |