File size: 2,753 Bytes
58ae1ee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
#!/usr/bin/env bash
#
# Copyright 2024 PKU-Alignment Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================


# Initialize variables
MODEL_NAME_OR_PATH="/data/align-anything/hantao/models/0916_ti_to_ti_sft"
TRAIN_DATASETS="/data/align-anything/hantao/align-anything/projects/text_image_to_text_image/outputs"
OUTPUT_DIR="../outputs/dpo_ti2ti_llf_1111_6.75k_400"
# For wandb online logging
export WANDB_API_KEY="7e2dcc0c310ebcb7cdcafd5e9320d6be55cf1a33"
export WANDB_MODE="online"
# Source the setup script
source ./setup.sh

# Execute deepspeed command
deepspeed \
	--master_port ${MASTER_PORT} \
	--module align_anything.trainers.text_image_to_text_image.dpo \
	--model_name_or_path ${MODEL_NAME_OR_PATH} \
	--train_datasets ${TRAIN_DATASETS} \
	--output_dir ${OUTPUT_DIR} \
	--per_device_train_batch_size 2 \
    --per_device_eval_batch_size 2 \
    --gradient_accumulation_steps 2 \
    --train_template Chameleon_preference \
    --train_split train \
	--train_data_files ti2ti_llf_1111_6.75k_400.pt \
	--learning_rate 5e-7 \
	--epochs 3 \
	--lr_scheduler_type cosine \
	--save_interval 400 

# # Initialize variables
# MODEL_NAME_OR_PATH="/data/align-anything/hantao/models/0916_ti_to_ti_sft"
# TRAIN_DATASETS="/data/align-anything/hantao/align-anything/projects/text_image_to_text_image/outputs"
# OUTPUT_DIR="../outputs/dpo_ti2ti_llf_1029_washed_re_train"
# # For wandb online logging
# export WANDB_API_KEY="7e2dcc0c310ebcb7cdcafd5e9320d6be55cf1a33"
# export WANDB_MODE="online"
# # Source the setup script
# source ./setup.sh

# # Execute deepspeed command
# deepspeed \
# 	--master_port ${MASTER_PORT} \
# 	--module align_anything.trainers.text_image_to_text_image.dpo \
# 	--model_name_or_path ${MODEL_NAME_OR_PATH} \
# 	--train_datasets ${TRAIN_DATASETS} \
# 	--output_dir ${OUTPUT_DIR} \
# 	--per_device_train_batch_size 2 \
#     --per_device_eval_batch_size 2 \
#     --gradient_accumulation_steps 2 \
#     --train_template Chameleon_preference \
#     --train_split train \
# 	--train_data_files ti2ti_llf_preference_filtered_tokenize.pt \
# 	--learning_rate 5e-7 \
# 	--epochs 3 \
# 	--lr_scheduler_type cosine \
# 	--save_interval 400