Gresham commited on
Commit
69331b5
·
1 Parent(s): 18308c4

delete: remove qlora finetuning script

Browse files
Files changed (2) hide show
  1. .gitignore +0 -1
  2. llama-fine-tuning-QLoRA.py +0 -199
.gitignore DELETED
@@ -1 +0,0 @@
1
- result/
 
 
llama-fine-tuning-QLoRA.py DELETED
@@ -1,199 +0,0 @@
1
- import os
2
-
3
- # 切换到当前文件所在的目录
4
- os.chdir(os.path.dirname(__file__))
5
-
6
- # 导入必要的库
7
- import torch
8
- import json
9
- from datasets import Dataset
10
- from transformers import (
11
- AutoModelForCausalLM, # 用于加载预训练的语言模型
12
- AutoTokenizer, # 用于加载与模型相匹配的分词器
13
- BitsAndBytesConfig, # 用于配置4-bit量化
14
- HfArgumentParser, # 用于解析命令行参数
15
- TrainingArguments, # 用于设置训练参数
16
- pipeline, # 用于创建模型的pipeline
17
- logging, # 用于记录日志
18
- )
19
- from huggingface_hub import hf_hub_download
20
- from peft import LoraConfig # 用于配置和加载QLoRA模型
21
- from trl import SFTTrainer # 用于执行监督式微调的Trainer
22
-
23
- # 设置预训练模型的名称
24
- model_name = "meta-llama/Llama-3.1-8B-Instruct"
25
-
26
- # 设置微调后模型的名称
27
- new_model = "Llama-3.1-8b-Instruct-fine-tuned"
28
-
29
- # LoRA的注意力维度
30
- lora_r = 64
31
-
32
- # Alpha参数用于LoRA缩放
33
- lora_alpha = 16
34
-
35
- # LoRA层的dropout概率
36
- lora_dropout = 0.1
37
-
38
- # 激活4-bit精度的基础模型加载
39
- use_4bit = True
40
-
41
- # 4-bit基础模型的计算数据类型
42
- bnb_4bit_compute_dtype = "float16"
43
-
44
- # 4-bit量化类型(fp4或nf4)
45
- bnb_4bit_quant_type = "nf4"
46
-
47
- # 激活4-bit基础模型的嵌套量化(双重量化)
48
- use_nested_quant = False
49
-
50
- # 输出目录,用于存储模型预测和检查点
51
- output_dir = "./results"
52
-
53
- # 训练周期数
54
- num_train_epochs = 1
55
-
56
- # 是否启用fp16/bf16训练(在A100上将bf16设置为True)
57
- fp16 = False
58
- bf16 = True
59
-
60
- # GPU上每个训练批次的样本数
61
- per_device_train_batch_size = 4
62
-
63
- # GPU上每个评估批次的样本数
64
- per_device_eval_batch_size = 4
65
-
66
- # 累积梯度的更新步骤数
67
- gradient_accumulation_steps = 1
68
-
69
- # 是否启用梯度检查点
70
- gradient_checkpointing = True
71
-
72
- # 最大梯度归一化(梯度裁剪)
73
- max_grad_norm = 0.3
74
-
75
- # 初始学习率(AdamW优化器)
76
- learning_rate = 2e-4
77
-
78
- # 权重衰减,应用于全部layer(不包括bias/LayerNorm的权重)
79
- weight_decay = 0.001
80
-
81
- # 优化器
82
- optim = "paged_adamw_32bit"
83
-
84
- # 学习率计划
85
- lr_scheduler_type = "cosine"
86
-
87
- # 训练步数(覆盖num_train_epochs)
88
- max_steps = -1
89
-
90
- # 线性预热的步数比率(从0到学习率)
91
- warmup_ratio = 0.03
92
-
93
- # 按长度分组序列
94
- group_by_length = True
95
-
96
- # 每X更新步骤保存检查点
97
- save_steps = 0
98
-
99
- # 每X更新步骤记录日志
100
- logging_steps = 25
101
-
102
- # SFT参数配置
103
- # 最大序列长度
104
- max_seq_length = None
105
-
106
- # 打包多个短示例到同一输入序列以提高效率
107
- packing = False
108
-
109
- # 将整个模型加载到 GPU 0
110
- device_map = {"": 0}
111
-
112
- # 加载数据集
113
- print("Loading dataset...")
114
- REPO_ID = "TreeAILab/NumericBench"
115
- dataset_name = 'num_list/num_list_500_per_sample_100_length.json'
116
- with open(hf_hub_download(repo_id=REPO_ID, filename=dataset_name, repo_type="dataset")) as f:
117
- dataset = json.load(f)
118
- fine_tune_dataset = []
119
-
120
- for instance in dataset["data"]:
121
- prompt = dataset["system_prompt"] + "\n\n" + dataset["description"] + "\nQuestion: " + instance["question"] + "\nData: " + instance["struct_data"]
122
- answer = instance["answer"]
123
- completion = f"The answer is {answer}."
124
- fine_tune_dataset.append({"prompt": prompt, "completion": completion})
125
-
126
- fine_tune_dataset = Dataset.from_list(fine_tune_dataset)
127
-
128
- compute_dtype = getattr(torch, bnb_4bit_compute_dtype)
129
-
130
- bnb_config = BitsAndBytesConfig(
131
- load_in_4bit=use_4bit,
132
- bnb_4bit_quant_type=bnb_4bit_quant_type,
133
- bnb_4bit_compute_dtype=compute_dtype,
134
- bnb_4bit_use_double_quant=use_nested_quant,
135
- )
136
-
137
- if compute_dtype == torch.float16 and use_4bit:
138
- major, _ = torch.cuda.get_device_capability()
139
- if major >= 8:
140
- print("GPU支持bfloat16")
141
-
142
- # 加载模型
143
- model = AutoModelForCausalLM.from_pretrained(
144
- model_name,
145
- quantization_config=bnb_config,
146
- device_map=device_map
147
- )
148
- model.config.use_cache = False
149
- model.config.pretraining_tp = 1
150
-
151
- # 加载分词器
152
- tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
153
- tokenizer.pad_token = tokenizer.eos_token
154
- tokenizer.padding_side = "right" # 修复fp16训练中的溢出问题
155
-
156
- peft_config = LoraConfig(
157
- lora_alpha=lora_alpha,
158
- lora_dropout=lora_dropout,
159
- r=lora_r,
160
- bias="none",
161
- task_type="CAUSAL_LM",
162
- )
163
-
164
- training_arguments = TrainingArguments(
165
- output_dir=output_dir,
166
- num_train_epochs=num_train_epochs,
167
- per_device_train_batch_size=per_device_train_batch_size,
168
- gradient_accumulation_steps=gradient_accumulation_steps,
169
- optim=optim,
170
- save_steps=save_steps,
171
- logging_steps=logging_steps,
172
- learning_rate=learning_rate,
173
- weight_decay=weight_decay,
174
- fp16=fp16,
175
- bf16=bf16,
176
- max_grad_norm=max_grad_norm,
177
- max_steps=max_steps,
178
- warmup_ratio=warmup_ratio,
179
- group_by_length=group_by_length,
180
- lr_scheduler_type=lr_scheduler_type,
181
- report_to="tensorboard",
182
- )
183
-
184
- # 设置监督式微调参数
185
- trainer = SFTTrainer(
186
- model=model,
187
- train_dataset=fine_tune_dataset,
188
- peft_config=peft_config,
189
- dataset_text_field="text",
190
- max_seq_length=max_seq_length,
191
- tokenizer=tokenizer,
192
- args=training_arguments,
193
- packing=packing,
194
- )
195
-
196
- # 训练模型
197
- trainer.train()
198
-
199
- trainer.model.save_pretrained(new_model)