Gresham commited on
Commit
aecf6f1
·
1 Parent(s): 720b6ae

feat: add llama fine tuning

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. llama-fine-tuning-QLoRA.py +193 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ result/
llama-fine-tuning-QLoRA.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # 切换到当前文件所在的目录
4
+ os.chdir(os.path.dirname(__file__))
5
+
6
+ # 导入必要的库
7
+ import torch
8
+ from datasets import load_dataset, Dataset
9
+ from transformers import (
10
+ AutoModelForCausalLM, # 用于加载预训练的语言模型
11
+ AutoTokenizer, # 用于加载与模型相匹配的分词器
12
+ BitsAndBytesConfig, # 用于配置4-bit量化
13
+ HfArgumentParser, # 用于解析命令行参数
14
+ TrainingArguments, # 用于设置训练参数
15
+ pipeline, # 用于创建模型的pipeline
16
+ logging, # 用于记录日志
17
+ )
18
+ from peft import LoraConfig, PeftModel # 用于配置和加载QLoRA模型
19
+ from trl import SFTTrainer # 用于执行监督式微调的Trainer
20
+
21
+ # 设置预训练模型的名称
22
+ model_name = "meta-llama/Llama-3.1-8B-Instruct"
23
+
24
+ # 设置微调后模型的名称
25
+ new_model = "Llama-3.1-8b-Instruct-fine-tuned"
26
+
27
+ # LoRA的注意力维度
28
+ lora_r = 64
29
+
30
+ # Alpha参数用于LoRA缩放
31
+ lora_alpha = 16
32
+
33
+ # LoRA层的dropout概率
34
+ lora_dropout = 0.1
35
+
36
+ # 激活4-bit精度的基础模型加载
37
+ use_4bit = True
38
+
39
+ # 4-bit基础模型的计算数据类型
40
+ bnb_4bit_compute_dtype = "float16"
41
+
42
+ # 4-bit量化类型(fp4或nf4)
43
+ bnb_4bit_quant_type = "nf4"
44
+
45
+ # 激活4-bit基础模型的嵌套量化(双重量化)
46
+ use_nested_quant = False
47
+
48
+ # 输出目录,用于存储模型预测和检查点
49
+ output_dir = "./results"
50
+
51
+ # 训练周期数
52
+ num_train_epochs = 1
53
+
54
+ # 是否启用fp16/bf16训练(在A100上将bf16设置为True)
55
+ fp16 = False
56
+ bf16 = True
57
+
58
+ # GPU上每个训练批次的样本数
59
+ per_device_train_batch_size = 4
60
+
61
+ # GPU上每个评估批次的样本数
62
+ per_device_eval_batch_size = 4
63
+
64
+ # 累积梯度的更新步骤数
65
+ gradient_accumulation_steps = 1
66
+
67
+ # 是否启用梯度检查点
68
+ gradient_checkpointing = True
69
+
70
+ # 最大梯度归一化(梯度裁剪)
71
+ max_grad_norm = 0.3
72
+
73
+ # 初始学习率(AdamW优化器)
74
+ learning_rate = 2e-4
75
+
76
+ # 权重衰减,应用于全部layer(不包括bias/LayerNorm的权重)
77
+ weight_decay = 0.001
78
+
79
+ # 优化器
80
+ optim = "paged_adamw_32bit"
81
+
82
+ # 学习率计划
83
+ lr_scheduler_type = "cosine"
84
+
85
+ # 训练步数(覆盖num_train_epochs)
86
+ max_steps = -1
87
+
88
+ # 线性预热的步数比率(从0到学习率)
89
+ warmup_ratio = 0.03
90
+
91
+ # 按长度分组序列
92
+ group_by_length = True
93
+
94
+ # 每X更新步骤保存检查点
95
+ save_steps = 0
96
+
97
+ # 每X更新步骤记录日志
98
+ logging_steps = 25
99
+
100
+ # SFT参数配置
101
+ # 最大序列长度
102
+ max_seq_length = None
103
+
104
+ # 打包多个短示例到同一输入序列以提高效率
105
+ packing = False
106
+
107
+ # 将整个模型加载到 GPU 0
108
+ device_map = {"": 0}
109
+
110
+ # 加载数据集
111
+ dataset = load_dataset(path="json", data_dir="./num_list", data_files="num_list_500_per_sample_100_length.json")
112
+ fine_tune_dataset = []
113
+ print("Loading dataset...")
114
+ for instance in dataset["train"]:
115
+ prompt = instance["system_prompt"] + "\n\n" + instance["description"] + "\nQuestion: " + instance["data"]["question"] + "\nData: " + instance["data"]["struct_data"]
116
+ answer = instance["data"]["answer"]
117
+ completion = f"The answer is {answer}."
118
+ fine_tune_dataset.append({"prompt": prompt, "completion": completion})
119
+
120
+ fine_tune_dataset = Dataset.from_list(fine_tune_dataset)
121
+
122
+ compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
123
+
124
+ bnb_config = BitsAndBytesConfig(
125
+ load_in_4bit=use_4bit,
126
+ bnb_4bit_quant_type=bnb_4bit_quant_type,
127
+ bnb_4bit_compute_dtype=compute_dtype,
128
+ bnb_4bit_use_double_quant=use_nested_quant,
129
+ )
130
+
131
+ if compute_dtype == torch.float16 and use_4bit:
132
+ major, _ = torch.cuda.get_device_capability()
133
+ if major >= 8:
134
+ print("GPU支持bfloat16")
135
+
136
+ # 加载模型
137
+ model = AutoModelForCausalLM.from_pretrained(
138
+ model_name,
139
+ quantization_config=bnb_config,
140
+ device_map=device_map
141
+ )
142
+ model.config.use_cache = False
143
+ model.config.pretraining_tp = 1
144
+
145
+ # 加载分词器
146
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
147
+ tokenizer.pad_token = tokenizer.eos_token
148
+ tokenizer.padding_side = "right" # 修复fp16训练中的溢出问题
149
+
150
+ peft_config = LoraConfig(
151
+ lora_alpha=lora_alpha,
152
+ lora_dropout=lora_dropout,
153
+ r=lora_r,
154
+ bias="none",
155
+ task_type="CAUSAL_LM",
156
+ )
157
+
158
+ training_arguments = TrainingArguments(
159
+ output_dir=output_dir,
160
+ num_train_epochs=num_train_epochs,
161
+ per_device_train_batch_size=per_device_train_batch_size,
162
+ gradient_accumulation_steps=gradient_accumulation_steps,
163
+ optim=optim,
164
+ save_steps=save_steps,
165
+ logging_steps=logging_steps,
166
+ learning_rate=learning_rate,
167
+ weight_decay=weight_decay,
168
+ fp16=fp16,
169
+ bf16=bf16,
170
+ max_grad_norm=max_grad_norm,
171
+ max_steps=max_steps,
172
+ warmup_ratio=warmup_ratio,
173
+ group_by_length=group_by_length,
174
+ lr_scheduler_type=lr_scheduler_type,
175
+ report_to="tensorboard",
176
+ )
177
+
178
+ # 设置监督式微调参数
179
+ trainer = SFTTrainer(
180
+ model=model,
181
+ train_dataset=fine_tune_dataset,
182
+ peft_config=peft_config,
183
+ dataset_text_field="text",
184
+ max_seq_length=max_seq_length,
185
+ tokenizer=tokenizer,
186
+ args=training_arguments,
187
+ packing=packing,
188
+ )
189
+
190
+ # 训练模型
191
+ trainer.train()
192
+
193
+ trainer.model.save_pretrained(new_model)