#!/usr/bin/env bash # # Copyright 2024 PKU-Alignment Team. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== export TRITON_CACHE_DIR="/home/align-anything/cache/triton" # You can replace it with a local model path MODEL_NAME_OR_PATH="/data/align-anything/hantao/models/0916_ti_to_ti_sft/" # You can replace it with a local dataset path TRAIN_DATASETS="/data/align-anything/hantao/align-anything/projects/text_image_to_text_image/outputs" TRAIN_DATA_FILES="llf_ti2ti_13.5k_tokenized.pt" # You can replace it with a new path OUTPUT_DIR="../outputs/sft_tf_cham_1111_13.5k_ti2ti" # For wandb online logging export WANDB_API_KEY="7e2dcc0c310ebcb7cdcafd5e9320d6be55cf1a33" # Source the setup script source ./setup.sh # sleep 30m # Execute deepspeed command deepspeed \ --master_port ${MASTER_PORT} \ --module align_anything.trainers.text_image_to_text_image.sft \ --model_name_or_path ${MODEL_NAME_OR_PATH} \ --train_datasets ${TRAIN_DATASETS} \ --train_data_files ${TRAIN_DATA_FILES} \ --output_dir ${OUTPUT_DIR} \ --train_template AA_textfeedback \ --train_split train \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 2 \ --save_interval 400 \ --learning_rate 1e-6 \ --epochs 3 \ --lr_scheduler_type cosine export TRITON_CACHE_DIR="/home/align-anything/cache/triton" # You can replace it with a local model path MODEL_NAME_OR_PATH="/data/align-anything/hantao/models/0916_ti_to_ti_sft/" # You can replace it with a local dataset path TRAIN_DATASETS="/data/align-anything/hantao/align-anything/projects/text_image_to_text_image/outputs" TRAIN_DATA_FILES="llf_ti2ti_6.75k_tokenized.pt" # You can replace it with a new path OUTPUT_DIR="../outputs/sft_tf_cham_1111_6.75k_ti2ti" # For wandb online logging export WANDB_API_KEY="7e2dcc0c310ebcb7cdcafd5e9320d6be55cf1a33" # Source the setup script source ./setup.sh # sleep 30m # Execute deepspeed command deepspeed \ --master_port ${MASTER_PORT} \ --module align_anything.trainers.text_image_to_text_image.sft \ --model_name_or_path ${MODEL_NAME_OR_PATH} \ --train_datasets ${TRAIN_DATASETS} \ --train_data_files ${TRAIN_DATA_FILES} \ --output_dir ${OUTPUT_DIR} \ --train_template AA_textfeedback \ --train_split train \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 2 \ --save_interval 400 \ --learning_rate 1e-6 \ --epochs 3 \ --lr_scheduler_type cosine