Datasets:
File size: 5,070 Bytes
aecf6f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 |
import os
# 切换到当前文件所在的目录
os.chdir(os.path.dirname(__file__))
# 导入必要的库
import torch
from datasets import load_dataset, Dataset
from transformers import (
AutoModelForCausalLM, # 用于加载预训练的语言模型
AutoTokenizer, # 用于加载与模型相匹配的分词器
BitsAndBytesConfig, # 用于配置4-bit量化
HfArgumentParser, # 用于解析命令行参数
TrainingArguments, # 用于设置训练参数
pipeline, # 用于创建模型的pipeline
logging, # 用于记录日志
)
from peft import LoraConfig, PeftModel # 用于配置和加载QLoRA模型
from trl import SFTTrainer # 用于执行监督式微调的Trainer
# 设置预训练模型的名称
model_name = "meta-llama/Llama-3.1-8B-Instruct"
# 设置微调后模型的名称
new_model = "Llama-3.1-8b-Instruct-fine-tuned"
# LoRA的注意力维度
lora_r = 64
# Alpha参数用于LoRA缩放
lora_alpha = 16
# LoRA层的dropout概率
lora_dropout = 0.1
# 激活4-bit精度的基础模型加载
use_4bit = True
# 4-bit基础模型的计算数据类型
bnb_4bit_compute_dtype = "float16"
# 4-bit量化类型(fp4或nf4)
bnb_4bit_quant_type = "nf4"
# 激活4-bit基础模型的嵌套量化(双重量化)
use_nested_quant = False
# 输出目录,用于存储模型预测和检查点
output_dir = "./results"
# 训练周期数
num_train_epochs = 1
# 是否启用fp16/bf16训练(在A100上将bf16设置为True)
fp16 = False
bf16 = True
# GPU上每个训练批次的样本数
per_device_train_batch_size = 4
# GPU上每个评估批次的样本数
per_device_eval_batch_size = 4
# 累积梯度的更新步骤数
gradient_accumulation_steps = 1
# 是否启用梯度检查点
gradient_checkpointing = True
# 最大梯度归一化(梯度裁剪)
max_grad_norm = 0.3
# 初始学习率(AdamW优化器)
learning_rate = 2e-4
# 权重衰减,应用于全部layer(不包括bias/LayerNorm的权重)
weight_decay = 0.001
# 优化器
optim = "paged_adamw_32bit"
# 学习率计划
lr_scheduler_type = "cosine"
# 训练步数(覆盖num_train_epochs)
max_steps = -1
# 线性预热的步数比率(从0到学习率)
warmup_ratio = 0.03
# 按长度分组序列
group_by_length = True
# 每X更新步骤保存检查点
save_steps = 0
# 每X更新步骤记录日志
logging_steps = 25
# SFT参数配置
# 最大序列长度
max_seq_length = None
# 打包多个短示例到同一输入序列以提高效率
packing = False
# 将整个模型加载到 GPU 0
device_map = {"": 0}
# 加载数据集
dataset = load_dataset(path="json", data_dir="./num_list", data_files="num_list_500_per_sample_100_length.json")
fine_tune_dataset = []
print("Loading dataset...")
for instance in dataset["train"]:
prompt = instance["system_prompt"] + "\n\n" + instance["description"] + "\nQuestion: " + instance["data"]["question"] + "\nData: " + instance["data"]["struct_data"]
answer = instance["data"]["answer"]
completion = f"The answer is {answer}."
fine_tune_dataset.append({"prompt": prompt, "completion": completion})
fine_tune_dataset = Dataset.from_list(fine_tune_dataset)
compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
bnb_config = BitsAndBytesConfig(
load_in_4bit=use_4bit,
bnb_4bit_quant_type=bnb_4bit_quant_type,
bnb_4bit_compute_dtype=compute_dtype,
bnb_4bit_use_double_quant=use_nested_quant,
)
if compute_dtype == torch.float16 and use_4bit:
major, _ = torch.cuda.get_device_capability()
if major >= 8:
print("GPU支持bfloat16")
# 加载模型
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
device_map=device_map
)
model.config.use_cache = False
model.config.pretraining_tp = 1
# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # 修复fp16训练中的溢出问题
peft_config = LoraConfig(
lora_alpha=lora_alpha,
lora_dropout=lora_dropout,
r=lora_r,
bias="none",
task_type="CAUSAL_LM",
)
training_arguments = TrainingArguments(
output_dir=output_dir,
num_train_epochs=num_train_epochs,
per_device_train_batch_size=per_device_train_batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
optim=optim,
save_steps=save_steps,
logging_steps=logging_steps,
learning_rate=learning_rate,
weight_decay=weight_decay,
fp16=fp16,
bf16=bf16,
max_grad_norm=max_grad_norm,
max_steps=max_steps,
warmup_ratio=warmup_ratio,
group_by_length=group_by_length,
lr_scheduler_type=lr_scheduler_type,
report_to="tensorboard",
)
# 设置监督式微调参数
trainer = SFTTrainer(
model=model,
train_dataset=fine_tune_dataset,
peft_config=peft_config,
dataset_text_field="text",
max_seq_length=max_seq_length,
tokenizer=tokenizer,
args=training_arguments,
packing=packing,
)
# 训练模型
trainer.train()
trainer.model.save_pretrained(new_model)
|