Spaces:
Runtime error
Runtime error
Delete mplug_docowl/train
Browse files
mplug_docowl/train/llama_flash_attn_monkey_patch.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
from typing import Optional, Tuple
|
2 |
-
import warnings
|
3 |
-
|
4 |
-
import torch
|
5 |
-
|
6 |
-
import transformers
|
7 |
-
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv
|
8 |
-
|
9 |
-
try:
|
10 |
-
from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
|
11 |
-
except ImportError:
|
12 |
-
from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func as flash_attn_unpadded_qkvpacked_func
|
13 |
-
from flash_attn.bert_padding import unpad_input, pad_input
|
14 |
-
|
15 |
-
|
16 |
-
def forward(
|
17 |
-
self,
|
18 |
-
hidden_states: torch.Tensor,
|
19 |
-
modality_indicators: torch.Tensor,
|
20 |
-
attention_mask: Optional[torch.Tensor] = None,
|
21 |
-
position_ids: Optional[torch.Tensor] = None,
|
22 |
-
past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
23 |
-
output_attentions: bool = False,
|
24 |
-
use_cache: bool = False,
|
25 |
-
padding_mask: bool = None,
|
26 |
-
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
27 |
-
if output_attentions:
|
28 |
-
warnings.warn(
|
29 |
-
"Output attentions is not supported for patched `LlamaAttention`, returning `None` instead."
|
30 |
-
)
|
31 |
-
|
32 |
-
bsz, q_len, _ = hidden_states.size()
|
33 |
-
|
34 |
-
query_states = (
|
35 |
-
self.q_proj(hidden_states)
|
36 |
-
.view(bsz, q_len, self.num_heads, self.head_dim)
|
37 |
-
.transpose(1, 2)
|
38 |
-
)
|
39 |
-
key_states = (
|
40 |
-
self.k_proj(hidden_states, modality_indicators)
|
41 |
-
.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
|
42 |
-
.transpose(1, 2)
|
43 |
-
)
|
44 |
-
value_states = (
|
45 |
-
self.v_proj(hidden_states, modality_indicators)
|
46 |
-
.view(bsz, q_len, self.num_key_value_heads, self.head_dim)
|
47 |
-
.transpose(1, 2)
|
48 |
-
) # shape: (b, num_heads, s, head_dim)
|
49 |
-
|
50 |
-
kv_seq_len = key_states.shape[-2]
|
51 |
-
if past_key_value is not None:
|
52 |
-
kv_seq_len += past_key_value[0].shape[-2]
|
53 |
-
|
54 |
-
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
55 |
-
query_states, key_states = apply_rotary_pos_emb(
|
56 |
-
query_states, key_states, cos, sin, position_ids
|
57 |
-
)
|
58 |
-
|
59 |
-
if past_key_value is not None:
|
60 |
-
# reuse k, v
|
61 |
-
key_states = torch.cat([past_key_value[0], key_states], dim=2)
|
62 |
-
value_states = torch.cat([past_key_value[1], value_states], dim=2)
|
63 |
-
|
64 |
-
past_key_value = (key_states, value_states) if use_cache else None
|
65 |
-
|
66 |
-
# repeat k/v heads if n_kv_heads < n_heads
|
67 |
-
key_states = repeat_kv(key_states, self.num_key_value_groups)
|
68 |
-
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
69 |
-
|
70 |
-
# Transform the data into the format required by flash attention
|
71 |
-
qkv = torch.stack([query_states, key_states, value_states], dim=2)
|
72 |
-
qkv = qkv.transpose(1, 3) # shape: [b, s, 3, num_heads, head_dim]
|
73 |
-
key_padding_mask = attention_mask
|
74 |
-
|
75 |
-
if key_padding_mask is None:
|
76 |
-
qkv = qkv.reshape(-1, 3, self.num_heads, self.head_dim)
|
77 |
-
cu_q_lens = torch.arange(
|
78 |
-
0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
|
79 |
-
)
|
80 |
-
max_s = q_len
|
81 |
-
output = flash_attn_unpadded_qkvpacked_func(
|
82 |
-
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
|
83 |
-
)
|
84 |
-
output = output.view(bsz, q_len, -1)
|
85 |
-
else:
|
86 |
-
qkv = qkv.reshape(bsz, q_len, -1)
|
87 |
-
qkv, indices, cu_q_lens, max_s = unpad_input(qkv, key_padding_mask)
|
88 |
-
qkv = qkv.view(-1, 3, self.num_heads, self.head_dim)
|
89 |
-
output_unpad = flash_attn_unpadded_qkvpacked_func(
|
90 |
-
qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
|
91 |
-
)
|
92 |
-
output_unpad = output_unpad.reshape(-1, self.num_heads * self.head_dim)
|
93 |
-
output = pad_input(output_unpad, indices, bsz, q_len)
|
94 |
-
|
95 |
-
return self.o_proj(output), None, past_key_value
|
96 |
-
|
97 |
-
|
98 |
-
# Disable the transformation of the attention mask in LlamaModel as the flash attention
|
99 |
-
# requires the attention mask to be the same as the key_padding_mask
|
100 |
-
def _prepare_decoder_attention_mask(
|
101 |
-
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
|
102 |
-
):
|
103 |
-
# [bsz, seq_len]
|
104 |
-
return attention_mask
|
105 |
-
|
106 |
-
|
107 |
-
def replace_llama_attn_with_flash_attn():
|
108 |
-
cuda_major, cuda_minor = torch.cuda.get_device_capability()
|
109 |
-
if cuda_major < 8:
|
110 |
-
warnings.warn(
|
111 |
-
"Flash attention is only supported on A100 or H100 GPU during training due to head dim > 64 backward."
|
112 |
-
"ref: https://github.com/HazyResearch/flash-attention/issues/190#issuecomment-1523359593"
|
113 |
-
)
|
114 |
-
transformers.models.llama.modeling_llama.LlamaModel._prepare_decoder_attention_mask = (
|
115 |
-
_prepare_decoder_attention_mask
|
116 |
-
)
|
117 |
-
transformers.models.llama.modeling_llama.LlamaAttention.forward = forward
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mplug_docowl/train/mplug_owl2_trainer.py
DELETED
@@ -1,243 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
|
4 |
-
from torch.utils.data import Sampler
|
5 |
-
|
6 |
-
from transformers import Trainer
|
7 |
-
from transformers.trainer import (
|
8 |
-
is_sagemaker_mp_enabled,
|
9 |
-
get_parameter_names,
|
10 |
-
has_length,
|
11 |
-
ALL_LAYERNORM_LAYERS,
|
12 |
-
ShardedDDPOption,
|
13 |
-
logger,
|
14 |
-
)
|
15 |
-
from typing import List, Optional
|
16 |
-
from icecream import ic
|
17 |
-
|
18 |
-
def maybe_zero_3(param, ignore_status=False, name=None):
|
19 |
-
from deepspeed import zero
|
20 |
-
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
|
21 |
-
if hasattr(param, "ds_id"):
|
22 |
-
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
|
23 |
-
if not ignore_status:
|
24 |
-
print(name, 'no ignore status')
|
25 |
-
with zero.GatheredParameters([param]):
|
26 |
-
param = param.data.detach().cpu().clone()
|
27 |
-
else:
|
28 |
-
param = param.detach().cpu().clone()
|
29 |
-
return param
|
30 |
-
|
31 |
-
|
32 |
-
def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
|
33 |
-
to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
|
34 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True, name=k).cpu() for k, v in to_return.items()}
|
35 |
-
return to_return
|
36 |
-
|
37 |
-
|
38 |
-
def split_to_even_chunks(indices, lengths, num_chunks):
|
39 |
-
"""
|
40 |
-
Split a list of indices into `chunks` chunks of roughly equal lengths.
|
41 |
-
"""
|
42 |
-
|
43 |
-
if len(indices) % num_chunks != 0:
|
44 |
-
return [indices[i::num_chunks] for i in range(num_chunks)]
|
45 |
-
|
46 |
-
num_indices_per_chunk = len(indices) // num_chunks
|
47 |
-
|
48 |
-
chunks = [[] for _ in range(num_chunks)]
|
49 |
-
chunks_lengths = [0 for _ in range(num_chunks)]
|
50 |
-
for index in indices:
|
51 |
-
shortest_chunk = chunks_lengths.index(min(chunks_lengths))
|
52 |
-
chunks[shortest_chunk].append(index)
|
53 |
-
chunks_lengths[shortest_chunk] += lengths[index]
|
54 |
-
if len(chunks[shortest_chunk]) == num_indices_per_chunk:
|
55 |
-
chunks_lengths[shortest_chunk] = float("inf")
|
56 |
-
|
57 |
-
return chunks
|
58 |
-
|
59 |
-
|
60 |
-
def get_modality_length_grouped_indices(lengths, batch_size, world_size, generator=None):
|
61 |
-
# We need to use torch for the random part as a distributed sampler will set the random seed for torch.
|
62 |
-
assert all(l != 0 for l in lengths), "Should not have zero length."
|
63 |
-
if all(l > 0 for l in lengths) or all(l < 0 for l in lengths):
|
64 |
-
# all samples are in the same modality
|
65 |
-
return get_length_grouped_indices(lengths, batch_size, world_size, generator=generator)
|
66 |
-
mm_indices, mm_lengths = zip(*[(i, l) for i, l in enumerate(lengths) if l > 0])
|
67 |
-
lang_indices, lang_lengths = zip(*[(i, -l) for i, l in enumerate(lengths) if l < 0])
|
68 |
-
|
69 |
-
mm_shuffle = [mm_indices[i] for i in get_length_grouped_indices(mm_lengths, batch_size, world_size, generator=None)]
|
70 |
-
lang_shuffle = [lang_indices[i] for i in get_length_grouped_indices(lang_lengths, batch_size, world_size, generator=None)]
|
71 |
-
megabatch_size = world_size * batch_size
|
72 |
-
mm_megabatches = [mm_shuffle[i : i + megabatch_size] for i in range(0, len(mm_shuffle), megabatch_size)]
|
73 |
-
lang_megabatches = [lang_shuffle[i : i + megabatch_size] for i in range(0, len(lang_shuffle), megabatch_size)]
|
74 |
-
|
75 |
-
last_mm = mm_megabatches[-1]
|
76 |
-
last_lang = lang_megabatches[-1]
|
77 |
-
additional_batch = last_mm + last_lang
|
78 |
-
megabatches = mm_megabatches[:-1] + lang_megabatches[:-1]
|
79 |
-
megabatch_indices = torch.randperm(len(megabatches), generator=generator)
|
80 |
-
megabatches = [megabatches[i] for i in megabatch_indices]
|
81 |
-
|
82 |
-
if len(additional_batch) > 0:
|
83 |
-
megabatches.append(sorted(additional_batch))
|
84 |
-
|
85 |
-
return [i for megabatch in megabatches for i in megabatch]
|
86 |
-
|
87 |
-
|
88 |
-
def get_length_grouped_indices(lengths, batch_size, world_size, generator=None, merge=True):
|
89 |
-
# We need to use torch for the random part as a distributed sampler will set the random seed for torch.
|
90 |
-
indices = torch.randperm(len(lengths), generator=generator)
|
91 |
-
megabatch_size = world_size * batch_size
|
92 |
-
megabatches = [indices[i : i + megabatch_size].tolist() for i in range(0, len(lengths), megabatch_size)]
|
93 |
-
megabatches = [sorted(megabatch, key=lambda i: lengths[i], reverse=True) for megabatch in megabatches]
|
94 |
-
megabatches = [split_to_even_chunks(megabatch, lengths, world_size) for megabatch in megabatches]
|
95 |
-
|
96 |
-
return [i for megabatch in megabatches for batch in megabatch for i in batch]
|
97 |
-
|
98 |
-
|
99 |
-
class LengthGroupedSampler(Sampler):
|
100 |
-
r"""
|
101 |
-
Sampler that samples indices in a way that groups together features of the dataset of roughly the same length while
|
102 |
-
keeping a bit of randomness.
|
103 |
-
"""
|
104 |
-
|
105 |
-
def __init__(
|
106 |
-
self,
|
107 |
-
batch_size: int,
|
108 |
-
world_size: int,
|
109 |
-
lengths: Optional[List[int]] = None,
|
110 |
-
generator=None,
|
111 |
-
group_by_modality: bool = False,
|
112 |
-
):
|
113 |
-
if lengths is None:
|
114 |
-
raise ValueError("Lengths must be provided.")
|
115 |
-
|
116 |
-
self.batch_size = batch_size
|
117 |
-
self.world_size = world_size
|
118 |
-
self.lengths = lengths
|
119 |
-
self.generator = generator
|
120 |
-
self.group_by_modality = group_by_modality
|
121 |
-
|
122 |
-
def __len__(self):
|
123 |
-
return len(self.lengths)
|
124 |
-
|
125 |
-
def __iter__(self):
|
126 |
-
if self.group_by_modality:
|
127 |
-
indices = get_modality_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
|
128 |
-
else:
|
129 |
-
indices = get_length_grouped_indices(self.lengths, self.batch_size, self.world_size, generator=self.generator)
|
130 |
-
return iter(indices)
|
131 |
-
|
132 |
-
|
133 |
-
class MPLUGOwl2Trainer(Trainer):
|
134 |
-
|
135 |
-
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
|
136 |
-
if self.train_dataset is None or not has_length(self.train_dataset):
|
137 |
-
return None
|
138 |
-
|
139 |
-
if self.args.group_by_modality_length:
|
140 |
-
lengths = self.train_dataset.modality_lengths
|
141 |
-
return LengthGroupedSampler(
|
142 |
-
self.args.train_batch_size,
|
143 |
-
world_size=self.args.world_size * self.args.gradient_accumulation_steps,
|
144 |
-
lengths=lengths,
|
145 |
-
group_by_modality=True,
|
146 |
-
)
|
147 |
-
else:
|
148 |
-
return super()._get_train_sampler()
|
149 |
-
|
150 |
-
def create_optimizer(self):
|
151 |
-
"""
|
152 |
-
Setup the optimizer.
|
153 |
-
|
154 |
-
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
|
155 |
-
Trainer's init through `optimizers`, or subclass and override this method in a subclass.
|
156 |
-
"""
|
157 |
-
if is_sagemaker_mp_enabled():
|
158 |
-
return super().create_optimizer()
|
159 |
-
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
|
160 |
-
return super().create_optimizer()
|
161 |
-
|
162 |
-
opt_model = self.model
|
163 |
-
|
164 |
-
if self.optimizer is None:
|
165 |
-
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
|
166 |
-
decay_parameters = [name for name in decay_parameters if "bias" not in name]
|
167 |
-
if self.args.visual_abstractor_lr is not None:
|
168 |
-
projector_parameters = [name for name, _ in opt_model.named_parameters() if "visual_abstractor_lr" in name]
|
169 |
-
optimizer_grouped_parameters = [
|
170 |
-
{
|
171 |
-
"params": [
|
172 |
-
p for n, p in opt_model.named_parameters() if (n in decay_parameters and n not in projector_parameters and p.requires_grad)
|
173 |
-
],
|
174 |
-
"weight_decay": self.args.weight_decay,
|
175 |
-
},
|
176 |
-
{
|
177 |
-
"params": [
|
178 |
-
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n not in projector_parameters and p.requires_grad)
|
179 |
-
],
|
180 |
-
"weight_decay": 0.0,
|
181 |
-
},
|
182 |
-
{
|
183 |
-
"params": [
|
184 |
-
p for n, p in opt_model.named_parameters() if (n in decay_parameters and n in projector_parameters and p.requires_grad)
|
185 |
-
],
|
186 |
-
"weight_decay": self.args.weight_decay,
|
187 |
-
"lr": self.args.visual_abstractor_lr,
|
188 |
-
},
|
189 |
-
{
|
190 |
-
"params": [
|
191 |
-
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and n in projector_parameters and p.requires_grad)
|
192 |
-
],
|
193 |
-
"weight_decay": 0.0,
|
194 |
-
"lr": self.args.visual_abstractor_lr,
|
195 |
-
},
|
196 |
-
]
|
197 |
-
else:
|
198 |
-
optimizer_grouped_parameters = [
|
199 |
-
{
|
200 |
-
"params": [
|
201 |
-
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
|
202 |
-
],
|
203 |
-
"weight_decay": self.args.weight_decay,
|
204 |
-
},
|
205 |
-
{
|
206 |
-
"params": [
|
207 |
-
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
|
208 |
-
],
|
209 |
-
"weight_decay": 0.0,
|
210 |
-
},
|
211 |
-
]
|
212 |
-
ic(len(optimizer_grouped_parameters[0]['params']),len(optimizer_grouped_parameters[1]['params']))
|
213 |
-
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
|
214 |
-
|
215 |
-
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
|
216 |
-
self.optimizer = OSS(
|
217 |
-
params=optimizer_grouped_parameters,
|
218 |
-
optim=optimizer_cls,
|
219 |
-
**optimizer_kwargs,
|
220 |
-
)
|
221 |
-
else:
|
222 |
-
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
|
223 |
-
if optimizer_cls.__name__ == "Adam8bit":
|
224 |
-
import bitsandbytes
|
225 |
-
|
226 |
-
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
|
227 |
-
|
228 |
-
skipped = 0
|
229 |
-
for module in opt_model.modules():
|
230 |
-
if isinstance(module, nn.Embedding):
|
231 |
-
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
|
232 |
-
logger.info(f"skipped {module}: {skipped/2**20}M params")
|
233 |
-
manager.register_module_override(module, "weight", {"optim_bits": 32})
|
234 |
-
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
|
235 |
-
logger.info(f"skipped: {skipped/2**20}M params")
|
236 |
-
|
237 |
-
return self.optimizer
|
238 |
-
|
239 |
-
def _save_checkpoint(self, model, trial, metrics=None):
|
240 |
-
super(MPLUGOwl2Trainer, self)._save_checkpoint(model, trial, metrics)
|
241 |
-
|
242 |
-
def _save(self, output_dir: Optional[str] = None, state_dict=None):
|
243 |
-
super(MPLUGOwl2Trainer, self)._save(output_dir, state_dict)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mplug_docowl/train/train.py
DELETED
@@ -1,801 +0,0 @@
|
|
1 |
-
# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
|
2 |
-
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
|
3 |
-
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li
|
4 |
-
#
|
5 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
-
# you may not use this file except in compliance with the License.
|
7 |
-
# You may obtain a copy of the License at
|
8 |
-
#
|
9 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
-
#
|
11 |
-
# Unless required by applicable law or agreed to in writing, software
|
12 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
-
# See the License for the specific language governing permissions and
|
15 |
-
# limitations under the License.
|
16 |
-
|
17 |
-
import os
|
18 |
-
import copy
|
19 |
-
from dataclasses import dataclass, field
|
20 |
-
import json
|
21 |
-
import logging
|
22 |
-
import pathlib
|
23 |
-
from typing import Dict, Optional, Sequence, List
|
24 |
-
|
25 |
-
import torch
|
26 |
-
|
27 |
-
import transformers
|
28 |
-
from transformers.models.clip.image_processing_clip import CLIPImageProcessor
|
29 |
-
|
30 |
-
from torch.utils.data import Dataset
|
31 |
-
from mplug_owl2.train.mplug_owl2_trainer import MPLUGOwl2Trainer
|
32 |
-
from mplug_owl2.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
|
33 |
-
|
34 |
-
from mplug_owl2 import conversation as conversation_lib
|
35 |
-
from mplug_owl2.model import *
|
36 |
-
from mplug_owl2.mm_utils import tokenizer_image_token
|
37 |
-
|
38 |
-
from PIL import Image
|
39 |
-
from icecream import ic
|
40 |
-
|
41 |
-
local_rank = None
|
42 |
-
|
43 |
-
|
44 |
-
def rank0_print(*args):
|
45 |
-
if local_rank == 0:
|
46 |
-
print(*args)
|
47 |
-
|
48 |
-
|
49 |
-
@dataclass
|
50 |
-
class ModelArguments:
|
51 |
-
model_name_or_path: Optional[str] = field(default="facebook/opt-125m")
|
52 |
-
version: Optional[str] = field(default="v0")
|
53 |
-
freeze_backbone: bool = field(default=False)
|
54 |
-
|
55 |
-
@dataclass
|
56 |
-
class DataArguments:
|
57 |
-
data_path: str = field(default=None,
|
58 |
-
metadata={"help": "Path to the training data."})
|
59 |
-
lazy_preprocess: bool = False
|
60 |
-
is_multimodal: bool = False
|
61 |
-
image_folder: Optional[str] = field(default=None)
|
62 |
-
image_aspect_ratio: str = 'square'
|
63 |
-
image_grid_pinpoints: Optional[str] = field(default=None)
|
64 |
-
|
65 |
-
|
66 |
-
@dataclass
|
67 |
-
class TrainingArguments(transformers.TrainingArguments):
|
68 |
-
cache_dir: Optional[str] = field(default=None)
|
69 |
-
optim: str = field(default="adamw_torch")
|
70 |
-
remove_unused_columns: bool = field(default=False)
|
71 |
-
|
72 |
-
tune_visual_abstractor: bool = field(default=True)
|
73 |
-
freeze_vision_model: bool = field(default=True)
|
74 |
-
|
75 |
-
model_max_length: int = field(
|
76 |
-
default=512,
|
77 |
-
metadata={
|
78 |
-
"help":
|
79 |
-
"Maximum sequence length. Sequences will be right padded (and possibly truncated)."
|
80 |
-
},
|
81 |
-
)
|
82 |
-
double_quant: bool = field(
|
83 |
-
default=True,
|
84 |
-
metadata={"help": "Compress the quantization statistics through double quantization."}
|
85 |
-
)
|
86 |
-
quant_type: str = field(
|
87 |
-
default="nf4",
|
88 |
-
metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."}
|
89 |
-
)
|
90 |
-
bits: int = field(
|
91 |
-
default=16,
|
92 |
-
metadata={"help": "How many bits to use."}
|
93 |
-
)
|
94 |
-
lora_enable: bool = False
|
95 |
-
lora_r: int = 64
|
96 |
-
lora_alpha: int = 16
|
97 |
-
lora_dropout: float = 0.05
|
98 |
-
lora_weight_path: str = ""
|
99 |
-
lora_bias: str = "none"
|
100 |
-
visual_abstractor_lr: Optional[float] = None
|
101 |
-
group_by_modality_length: bool = field(default=False)
|
102 |
-
|
103 |
-
|
104 |
-
def maybe_zero_3(param, ignore_status=False, name=None):
|
105 |
-
from deepspeed import zero
|
106 |
-
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
|
107 |
-
if hasattr(param, "ds_id"):
|
108 |
-
if param.ds_status == ZeroParamStatus.NOT_AVAILABLE:
|
109 |
-
if not ignore_status:
|
110 |
-
logging.warning(f"{name}: param.ds_status != ZeroParamStatus.NOT_AVAILABLE: {param.ds_status}")
|
111 |
-
with zero.GatheredParameters([param]):
|
112 |
-
param = param.data.detach().cpu().clone()
|
113 |
-
else:
|
114 |
-
param = param.detach().cpu().clone()
|
115 |
-
return param
|
116 |
-
|
117 |
-
|
118 |
-
# Borrowed from peft.utils.get_peft_model_state_dict
|
119 |
-
def get_peft_state_maybe_zero_3(named_params, bias):
|
120 |
-
if bias == "none":
|
121 |
-
to_return = {k: t for k, t in named_params if "lora_" in k}
|
122 |
-
elif bias == "all":
|
123 |
-
to_return = {k: t for k, t in named_params if "lora_" in k or "bias" in k}
|
124 |
-
elif bias == "lora_only":
|
125 |
-
to_return = {}
|
126 |
-
maybe_lora_bias = {}
|
127 |
-
lora_bias_names = set()
|
128 |
-
for k, t in named_params:
|
129 |
-
if "lora_" in k:
|
130 |
-
to_return[k] = t
|
131 |
-
bias_name = k.split("lora_")[0] + "bias"
|
132 |
-
lora_bias_names.add(bias_name)
|
133 |
-
elif "bias" in k:
|
134 |
-
maybe_lora_bias[k] = t
|
135 |
-
for k, t in maybe_lora_bias:
|
136 |
-
if bias_name in lora_bias_names:
|
137 |
-
to_return[bias_name] = t
|
138 |
-
else:
|
139 |
-
raise NotImplementedError
|
140 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True) for k, v in to_return.items()}
|
141 |
-
return to_return
|
142 |
-
|
143 |
-
|
144 |
-
def get_peft_state_non_lora_maybe_zero_3(named_params, require_grad_only=True):
|
145 |
-
to_return = {k: t for k, t in named_params if "lora_" not in k}
|
146 |
-
if require_grad_only:
|
147 |
-
to_return = {k: t for k, t in to_return.items() if t.requires_grad}
|
148 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
|
149 |
-
return to_return
|
150 |
-
|
151 |
-
|
152 |
-
def get_mm_adapter_state_maybe_zero_3(named_params, keys_to_match):
|
153 |
-
to_return = {k: t for k, t in named_params if any(key_match in k for key_match in keys_to_match)}
|
154 |
-
to_return = {k: maybe_zero_3(v, ignore_status=True).cpu() for k, v in to_return.items()}
|
155 |
-
return to_return
|
156 |
-
|
157 |
-
|
158 |
-
def find_all_linear_names(model):
|
159 |
-
cls = torch.nn.Linear
|
160 |
-
lora_module_names = set()
|
161 |
-
multimodal_keywords = ['vision_model', 'visual_abstractor']
|
162 |
-
for name, module in model.named_modules():
|
163 |
-
if any(mm_keyword in name for mm_keyword in multimodal_keywords):
|
164 |
-
continue
|
165 |
-
if isinstance(module, cls):
|
166 |
-
lora_module_names.add(name)
|
167 |
-
|
168 |
-
if 'lm_head' in lora_module_names: # needed for 16-bit
|
169 |
-
lora_module_names.remove('lm_head')
|
170 |
-
return list(lora_module_names)
|
171 |
-
|
172 |
-
|
173 |
-
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer,
|
174 |
-
output_dir: str):
|
175 |
-
"""Collects the state dict and dump to disk."""
|
176 |
-
|
177 |
-
if trainer.deepspeed:
|
178 |
-
torch.cuda.synchronize()
|
179 |
-
trainer.save_model(output_dir)
|
180 |
-
return
|
181 |
-
|
182 |
-
state_dict = trainer.model.state_dict()
|
183 |
-
if trainer.args.should_save:
|
184 |
-
cpu_state_dict = {
|
185 |
-
key: value.cpu()
|
186 |
-
for key, value in state_dict.items()
|
187 |
-
}
|
188 |
-
del state_dict
|
189 |
-
trainer._save(output_dir, state_dict=cpu_state_dict) # noqa
|
190 |
-
|
191 |
-
|
192 |
-
def smart_tokenizer_and_embedding_resize(
|
193 |
-
special_tokens_dict: Dict,
|
194 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
195 |
-
model: transformers.PreTrainedModel,
|
196 |
-
):
|
197 |
-
"""Resize tokenizer and embedding.
|
198 |
-
|
199 |
-
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
|
200 |
-
"""
|
201 |
-
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
|
202 |
-
model.resize_token_embeddings(len(tokenizer))
|
203 |
-
|
204 |
-
if num_new_tokens > 0:
|
205 |
-
input_embeddings = model.get_input_embeddings().weight.data
|
206 |
-
output_embeddings = model.get_output_embeddings().weight.data
|
207 |
-
|
208 |
-
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
|
209 |
-
dim=0, keepdim=True)
|
210 |
-
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
|
211 |
-
dim=0, keepdim=True)
|
212 |
-
|
213 |
-
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
214 |
-
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
215 |
-
|
216 |
-
|
217 |
-
def _tokenize_fn(strings: Sequence[str],
|
218 |
-
tokenizer: transformers.PreTrainedTokenizer) -> Dict:
|
219 |
-
"""Tokenize a list of strings."""
|
220 |
-
tokenized_list = [
|
221 |
-
tokenizer(
|
222 |
-
text,
|
223 |
-
return_tensors="pt",
|
224 |
-
padding="longest",
|
225 |
-
max_length=tokenizer.model_max_length,
|
226 |
-
truncation=True,
|
227 |
-
) for text in strings
|
228 |
-
]
|
229 |
-
input_ids = labels = [
|
230 |
-
tokenized.input_ids[0] for tokenized in tokenized_list
|
231 |
-
]
|
232 |
-
input_ids_lens = labels_lens = [
|
233 |
-
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item()
|
234 |
-
for tokenized in tokenized_list
|
235 |
-
]
|
236 |
-
return dict(
|
237 |
-
input_ids=input_ids,
|
238 |
-
labels=labels,
|
239 |
-
input_ids_lens=input_ids_lens,
|
240 |
-
labels_lens=labels_lens,
|
241 |
-
)
|
242 |
-
|
243 |
-
|
244 |
-
def _mask_targets(target, tokenized_lens, speakers):
|
245 |
-
# cur_idx = 0
|
246 |
-
cur_idx = tokenized_lens[0]
|
247 |
-
tokenized_lens = tokenized_lens[1:]
|
248 |
-
target[:cur_idx] = IGNORE_INDEX
|
249 |
-
for tokenized_len, speaker in zip(tokenized_lens, speakers):
|
250 |
-
if speaker == "human":
|
251 |
-
target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX
|
252 |
-
cur_idx += tokenized_len
|
253 |
-
|
254 |
-
|
255 |
-
def _add_speaker_and_signal(header, source, get_conversation=True):
|
256 |
-
"""Add speaker and start/end signal on each round."""
|
257 |
-
BEGIN_SIGNAL = "### "
|
258 |
-
END_SIGNAL = "\n"
|
259 |
-
conversation = header
|
260 |
-
for sentence in source:
|
261 |
-
from_str = sentence["from"]
|
262 |
-
if from_str.lower() == "human":
|
263 |
-
from_str = conversation_lib.default_conversation.roles[0]
|
264 |
-
elif from_str.lower() == "gpt":
|
265 |
-
from_str = conversation_lib.default_conversation.roles[1]
|
266 |
-
else:
|
267 |
-
from_str = 'unknown'
|
268 |
-
sentence["value"] = (BEGIN_SIGNAL + from_str + ": " +
|
269 |
-
sentence["value"] + END_SIGNAL)
|
270 |
-
if get_conversation:
|
271 |
-
conversation += sentence["value"]
|
272 |
-
conversation += BEGIN_SIGNAL
|
273 |
-
return conversation
|
274 |
-
|
275 |
-
|
276 |
-
def preprocess_multimodal(
|
277 |
-
sources: Sequence[str],
|
278 |
-
data_args: DataArguments
|
279 |
-
) -> Dict:
|
280 |
-
is_multimodal = data_args.is_multimodal
|
281 |
-
if not is_multimodal:
|
282 |
-
return sources
|
283 |
-
|
284 |
-
for source in sources:
|
285 |
-
for sentence in source:
|
286 |
-
if DEFAULT_IMAGE_TOKEN in sentence['value']:
|
287 |
-
sentence['value'] = sentence['value'].replace(DEFAULT_IMAGE_TOKEN, '').strip()
|
288 |
-
sentence['value'] = DEFAULT_IMAGE_TOKEN + '\n' + sentence['value']
|
289 |
-
sentence['value'] = sentence['value'].strip()
|
290 |
-
|
291 |
-
replace_token = DEFAULT_IMAGE_TOKEN
|
292 |
-
sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token)
|
293 |
-
|
294 |
-
return sources
|
295 |
-
|
296 |
-
|
297 |
-
def preprocess_v1(
|
298 |
-
sources,
|
299 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
300 |
-
has_image: bool = False
|
301 |
-
) -> Dict:
|
302 |
-
conv = conversation_lib.default_conversation.copy()
|
303 |
-
roles = {"human": conv.roles[0], "gpt": conv.roles[1]}
|
304 |
-
|
305 |
-
# Apply prompt templates
|
306 |
-
conversations = []
|
307 |
-
for i, source in enumerate(sources):
|
308 |
-
if roles[source[0]["from"]] != conv.roles[0]:
|
309 |
-
# Skip the first one if it is not from human
|
310 |
-
source = source[1:]
|
311 |
-
|
312 |
-
conv.messages = []
|
313 |
-
for j, sentence in enumerate(source):
|
314 |
-
role = roles[sentence["from"]]
|
315 |
-
assert role == conv.roles[j % 2], f"{i}"
|
316 |
-
conv.append_message(role, sentence["value"])
|
317 |
-
conversations.append(conv.get_prompt())
|
318 |
-
|
319 |
-
# Tokenize conversations
|
320 |
-
|
321 |
-
if has_image:
|
322 |
-
input_ids = torch.stack([tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations], dim=0)
|
323 |
-
else:
|
324 |
-
input_ids = tokenizer(
|
325 |
-
conversations,
|
326 |
-
return_tensors="pt",
|
327 |
-
padding="longest",
|
328 |
-
max_length=tokenizer.model_max_length,
|
329 |
-
truncation=True,
|
330 |
-
).input_ids
|
331 |
-
|
332 |
-
targets = input_ids.clone()
|
333 |
-
|
334 |
-
assert conv.sep_style == conversation_lib.SeparatorStyle.TWO or conv.sep_style == conversation_lib.SeparatorStyle.TWO_NO_SYS
|
335 |
-
|
336 |
-
# Mask targets
|
337 |
-
sep = conv.sep + conv.roles[1] + ": "
|
338 |
-
for conversation, target in zip(conversations, targets):
|
339 |
-
total_len = int(target.ne(tokenizer.pad_token_id).sum())
|
340 |
-
|
341 |
-
rounds = conversation.split(conv.sep2)
|
342 |
-
cur_len = 1
|
343 |
-
target[:cur_len] = IGNORE_INDEX
|
344 |
-
for i, rou in enumerate(rounds):
|
345 |
-
if rou == "":
|
346 |
-
break
|
347 |
-
|
348 |
-
parts = rou.split(sep)
|
349 |
-
if len(parts) != 2:
|
350 |
-
break
|
351 |
-
parts[0] += sep
|
352 |
-
|
353 |
-
if has_image:
|
354 |
-
round_len = len(tokenizer_image_token(rou, tokenizer))
|
355 |
-
instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2
|
356 |
-
else:
|
357 |
-
round_len = len(tokenizer(rou).input_ids)
|
358 |
-
instruction_len = len(tokenizer(parts[0]).input_ids) - 2
|
359 |
-
|
360 |
-
target[cur_len : cur_len + instruction_len] = IGNORE_INDEX
|
361 |
-
|
362 |
-
cur_len += round_len
|
363 |
-
target[cur_len:] = IGNORE_INDEX
|
364 |
-
|
365 |
-
if cur_len < tokenizer.model_max_length:
|
366 |
-
if cur_len != total_len:
|
367 |
-
target[:] = IGNORE_INDEX
|
368 |
-
print(
|
369 |
-
f"WARNING: tokenization mismatch: {cur_len} vs. {total_len}."
|
370 |
-
f" (ignored)"
|
371 |
-
)
|
372 |
-
|
373 |
-
return dict(
|
374 |
-
input_ids=input_ids,
|
375 |
-
labels=targets,
|
376 |
-
)
|
377 |
-
|
378 |
-
|
379 |
-
def preprocess_plain(
|
380 |
-
sources: Sequence[str],
|
381 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
382 |
-
) -> Dict:
|
383 |
-
# add end signal and concatenate together
|
384 |
-
conversations = []
|
385 |
-
for source in sources:
|
386 |
-
assert len(source) == 2
|
387 |
-
assert DEFAULT_IMAGE_TOKEN in source[0]['value']
|
388 |
-
source[0]['value'] = DEFAULT_IMAGE_TOKEN
|
389 |
-
conversation = source[0]['value'] + source[1]['value'] + conversation_lib.default_conversation.sep
|
390 |
-
conversations.append(conversation)
|
391 |
-
# tokenize conversations
|
392 |
-
input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
|
393 |
-
targets = copy.deepcopy(input_ids)
|
394 |
-
for target, source in zip(targets, sources):
|
395 |
-
tokenized_len = len(tokenizer_image_token(source[0]['value'], tokenizer))
|
396 |
-
target[:tokenized_len] = IGNORE_INDEX
|
397 |
-
|
398 |
-
return dict(input_ids=input_ids, labels=targets)
|
399 |
-
|
400 |
-
|
401 |
-
def preprocess(
|
402 |
-
sources: Sequence[str],
|
403 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
404 |
-
has_image: bool = False
|
405 |
-
) -> Dict:
|
406 |
-
"""
|
407 |
-
Given a list of sources, each is a conversation list. This transform:
|
408 |
-
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
|
409 |
-
2. Concatenate conversations together;
|
410 |
-
3. Tokenize the concatenated conversation;
|
411 |
-
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
|
412 |
-
"""
|
413 |
-
if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
|
414 |
-
return preprocess_plain(sources, tokenizer)
|
415 |
-
if conversation_lib.default_conversation.version.startswith("v1"):
|
416 |
-
return preprocess_v1(sources, tokenizer, has_image=has_image)
|
417 |
-
# add end signal and concatenate together
|
418 |
-
conversations = []
|
419 |
-
for source in sources:
|
420 |
-
header = f"{conversation_lib.default_conversation.system}\n\n"
|
421 |
-
conversation = _add_speaker_and_signal(header, source)
|
422 |
-
conversations.append(conversation)
|
423 |
-
# tokenize conversations
|
424 |
-
def get_tokenize_len(prompts):
|
425 |
-
return [len(tokenizer_image_token(prompt, tokenizer)) for prompt in prompts]
|
426 |
-
if has_image:
|
427 |
-
input_ids = [tokenizer_image_token(prompt, tokenizer, return_tensors='pt') for prompt in conversations]
|
428 |
-
else:
|
429 |
-
conversations_tokenized = _tokenize_fn(conversations, tokenizer)
|
430 |
-
input_ids = conversations_tokenized["input_ids"]
|
431 |
-
|
432 |
-
targets = copy.deepcopy(input_ids)
|
433 |
-
for target, source in zip(targets, sources):
|
434 |
-
if has_image:
|
435 |
-
tokenized_lens = get_tokenize_len([header] + [s["value"] for s in source])
|
436 |
-
else:
|
437 |
-
tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"]
|
438 |
-
speakers = [sentence["from"] for sentence in source]
|
439 |
-
_mask_targets(target, tokenized_lens, speakers)
|
440 |
-
|
441 |
-
return dict(input_ids=input_ids, labels=targets)
|
442 |
-
|
443 |
-
|
444 |
-
class LazySupervisedDataset(Dataset):
|
445 |
-
"""Dataset for supervised fine-tuning."""
|
446 |
-
|
447 |
-
def __init__(self, data_path: str,
|
448 |
-
tokenizer: transformers.PreTrainedTokenizer,
|
449 |
-
data_args: DataArguments):
|
450 |
-
super(LazySupervisedDataset, self).__init__()
|
451 |
-
list_data_dict = json.load(open(data_path, "r"))
|
452 |
-
|
453 |
-
rank0_print("Formatting inputs...Skip in lazy mode")
|
454 |
-
self.tokenizer = tokenizer
|
455 |
-
self.list_data_dict = list_data_dict
|
456 |
-
self.data_args = data_args
|
457 |
-
|
458 |
-
def __len__(self):
|
459 |
-
return len(self.list_data_dict)
|
460 |
-
|
461 |
-
@property
|
462 |
-
def lengths(self):
|
463 |
-
length_list = []
|
464 |
-
for sample in self.list_data_dict:
|
465 |
-
img_tokens = 128 if 'image' in sample else 0
|
466 |
-
length_list.append(sum(len(conv['value'].split()) for conv in sample['conversations']) + img_tokens)
|
467 |
-
return length_list
|
468 |
-
|
469 |
-
|
470 |
-
@property
|
471 |
-
def modality_lengths(self):
|
472 |
-
length_list = []
|
473 |
-
for sample in self.list_data_dict:
|
474 |
-
cur_len = sum(len(conv['value'].split()) for conv in sample['conversations'])
|
475 |
-
cur_len = cur_len if 'image' in sample else -cur_len
|
476 |
-
length_list.append(cur_len)
|
477 |
-
return length_list
|
478 |
-
|
479 |
-
# def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
480 |
-
# sources = self.list_data_dict[i]
|
481 |
-
# if isinstance(i, int):
|
482 |
-
# sources = [sources]
|
483 |
-
# assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
|
484 |
-
# if 'image' in sources[0]:
|
485 |
-
# image_file = self.list_data_dict[i]['image']
|
486 |
-
# image_folder = self.data_args.image_folder
|
487 |
-
# processor = self.data_args.image_processor
|
488 |
-
# image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
|
489 |
-
# if self.data_args.image_aspect_ratio == 'pad':
|
490 |
-
# def expand2square(pil_img, background_color):
|
491 |
-
# width, height = pil_img.size
|
492 |
-
# if width == height:
|
493 |
-
# return pil_img
|
494 |
-
# elif width > height:
|
495 |
-
# result = Image.new(pil_img.mode, (width, width), background_color)
|
496 |
-
# result.paste(pil_img, (0, (width - height) // 2))
|
497 |
-
# return result
|
498 |
-
# else:
|
499 |
-
# result = Image.new(pil_img.mode, (height, height), background_color)
|
500 |
-
# result.paste(pil_img, ((height - width) // 2, 0))
|
501 |
-
# return result
|
502 |
-
# image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
503 |
-
# image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
504 |
-
# else:
|
505 |
-
# image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
506 |
-
# sources = preprocess_multimodal(
|
507 |
-
# copy.deepcopy([e["conversations"] for e in sources]),
|
508 |
-
# self.data_args)
|
509 |
-
# else:
|
510 |
-
# sources = copy.deepcopy([e["conversations"] for e in sources])
|
511 |
-
# data_dict = preprocess(
|
512 |
-
# sources,
|
513 |
-
# self.tokenizer,
|
514 |
-
# has_image=('image' in self.list_data_dict[i]))
|
515 |
-
# if isinstance(i, int):
|
516 |
-
# data_dict = dict(input_ids=data_dict["input_ids"][0],
|
517 |
-
# labels=data_dict["labels"][0])
|
518 |
-
|
519 |
-
# # image exist in the data
|
520 |
-
# if 'image' in self.list_data_dict[i]:
|
521 |
-
# data_dict['image'] = image
|
522 |
-
# elif self.data_args.is_multimodal:
|
523 |
-
# # image does not exist in the data, but the model is multimodal
|
524 |
-
# crop_size = self.data_args.image_processor.crop_size
|
525 |
-
# data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
|
526 |
-
# return data_dict
|
527 |
-
|
528 |
-
def next_rand(self):
|
529 |
-
import random
|
530 |
-
return random.randint(0,len(self)-1)
|
531 |
-
|
532 |
-
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
|
533 |
-
while True:
|
534 |
-
sources = self.list_data_dict[i]
|
535 |
-
if isinstance(i, int):
|
536 |
-
sources = [sources]
|
537 |
-
assert len(sources) == 1, "Don't know why it is wrapped to a list" # FIXME
|
538 |
-
if 'image' in sources[0]:
|
539 |
-
|
540 |
-
image_file = self.list_data_dict[i]['image']
|
541 |
-
image_folder = self.data_args.image_folder
|
542 |
-
processor = self.data_args.image_processor
|
543 |
-
from pathlib import Path
|
544 |
-
if not Path(os.path.join(image_folder, image_file)).exists():
|
545 |
-
i = self.next_rand()
|
546 |
-
continue
|
547 |
-
image = Image.open(os.path.join(image_folder, image_file)).convert('RGB')
|
548 |
-
if self.data_args.image_aspect_ratio == 'pad':
|
549 |
-
def expand2square(pil_img, background_color):
|
550 |
-
width, height = pil_img.size
|
551 |
-
if width == height:
|
552 |
-
return pil_img
|
553 |
-
elif width > height:
|
554 |
-
result = Image.new(pil_img.mode, (width, width), background_color)
|
555 |
-
result.paste(pil_img, (0, (width - height) // 2))
|
556 |
-
return result
|
557 |
-
else:
|
558 |
-
result = Image.new(pil_img.mode, (height, height), background_color)
|
559 |
-
result.paste(pil_img, ((height - width) // 2, 0))
|
560 |
-
return result
|
561 |
-
image = expand2square(image, tuple(int(x*255) for x in processor.image_mean))
|
562 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
563 |
-
else:
|
564 |
-
image = processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
565 |
-
sources = preprocess_multimodal(
|
566 |
-
copy.deepcopy([e["conversations"] for e in sources]),
|
567 |
-
self.data_args)
|
568 |
-
else:
|
569 |
-
|
570 |
-
sources = copy.deepcopy([e["conversations"] for e in sources])
|
571 |
-
data_dict = preprocess(
|
572 |
-
sources,
|
573 |
-
self.tokenizer,
|
574 |
-
has_image=('image' in self.list_data_dict[i]))
|
575 |
-
if isinstance(i, int):
|
576 |
-
data_dict = dict(input_ids=data_dict["input_ids"][0],
|
577 |
-
labels=data_dict["labels"][0])
|
578 |
-
|
579 |
-
# image exist in the data
|
580 |
-
if 'image' in self.list_data_dict[i]:
|
581 |
-
data_dict['image'] = image
|
582 |
-
elif self.data_args.is_multimodal:
|
583 |
-
# image does not exist in the data, but the model is multimodal
|
584 |
-
crop_size = self.data_args.image_processor.crop_size
|
585 |
-
data_dict['image'] = torch.zeros(3, crop_size['height'], crop_size['width'])
|
586 |
-
return data_dict
|
587 |
-
|
588 |
-
|
589 |
-
@dataclass
|
590 |
-
class DataCollatorForSupervisedDataset(object):
|
591 |
-
"""Collate examples for supervised fine-tuning."""
|
592 |
-
|
593 |
-
tokenizer: transformers.PreTrainedTokenizer
|
594 |
-
|
595 |
-
def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]:
|
596 |
-
input_ids, labels = tuple([instance[key] for instance in instances]
|
597 |
-
for key in ("input_ids", "labels"))
|
598 |
-
input_ids = torch.nn.utils.rnn.pad_sequence(
|
599 |
-
input_ids,
|
600 |
-
batch_first=True,
|
601 |
-
padding_value=self.tokenizer.pad_token_id)
|
602 |
-
labels = torch.nn.utils.rnn.pad_sequence(labels,
|
603 |
-
batch_first=True,
|
604 |
-
padding_value=IGNORE_INDEX)
|
605 |
-
input_ids = input_ids[:, :self.tokenizer.model_max_length]
|
606 |
-
labels = labels[:, :self.tokenizer.model_max_length]
|
607 |
-
batch = dict(
|
608 |
-
input_ids=input_ids,
|
609 |
-
labels=labels,
|
610 |
-
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
|
611 |
-
)
|
612 |
-
|
613 |
-
if 'image' in instances[0]:
|
614 |
-
images = [instance['image'] for instance in instances]
|
615 |
-
if all(x is not None and x.shape == images[0].shape for x in images):
|
616 |
-
batch['images'] = torch.stack(images)
|
617 |
-
else:
|
618 |
-
batch['images'] = images
|
619 |
-
|
620 |
-
return batch
|
621 |
-
|
622 |
-
|
623 |
-
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer,
|
624 |
-
data_args) -> Dict:
|
625 |
-
"""Make dataset and collator for supervised fine-tuning."""
|
626 |
-
train_dataset = LazySupervisedDataset(tokenizer=tokenizer,
|
627 |
-
data_path=data_args.data_path,
|
628 |
-
data_args=data_args)
|
629 |
-
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
|
630 |
-
return dict(train_dataset=train_dataset,
|
631 |
-
eval_dataset=None,
|
632 |
-
data_collator=data_collator)
|
633 |
-
|
634 |
-
|
635 |
-
def train():
|
636 |
-
global local_rank
|
637 |
-
|
638 |
-
parser = transformers.HfArgumentParser(
|
639 |
-
(ModelArguments, DataArguments, TrainingArguments))
|
640 |
-
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
641 |
-
local_rank = training_args.local_rank
|
642 |
-
compute_dtype = (torch.float16 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
|
643 |
-
|
644 |
-
bnb_model_from_pretrained_args = {}
|
645 |
-
if training_args.bits in [4, 8]:
|
646 |
-
from transformers import BitsAndBytesConfig
|
647 |
-
bnb_model_from_pretrained_args.update(dict(
|
648 |
-
device_map={"": training_args.device},
|
649 |
-
load_in_4bit=training_args.bits == 4,
|
650 |
-
load_in_8bit=training_args.bits == 8,
|
651 |
-
quantization_config=BitsAndBytesConfig(
|
652 |
-
load_in_4bit=training_args.bits == 4,
|
653 |
-
load_in_8bit=training_args.bits == 8,
|
654 |
-
llm_int8_threshold=6.0,
|
655 |
-
llm_int8_has_fp16_weight=False,
|
656 |
-
bnb_4bit_compute_dtype=compute_dtype,
|
657 |
-
bnb_4bit_use_double_quant=training_args.double_quant,
|
658 |
-
bnb_4bit_quant_type=training_args.quant_type # {'fp4', 'nf4'}
|
659 |
-
)
|
660 |
-
))
|
661 |
-
|
662 |
-
model = MPLUGOwl2LlamaForCausalLM.from_pretrained(
|
663 |
-
model_args.model_name_or_path,
|
664 |
-
cache_dir=training_args.cache_dir,
|
665 |
-
**bnb_model_from_pretrained_args
|
666 |
-
)
|
667 |
-
model.config.use_cache = False
|
668 |
-
|
669 |
-
if model_args.freeze_backbone:
|
670 |
-
model.model.requires_grad_(False)
|
671 |
-
|
672 |
-
if training_args.bits in [4, 8]:
|
673 |
-
from peft import prepare_model_for_kbit_training
|
674 |
-
model.config.torch_dtype=(torch.float32 if training_args.fp16 else (torch.bfloat16 if training_args.bf16 else torch.float32))
|
675 |
-
model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=training_args.gradient_checkpointing)
|
676 |
-
|
677 |
-
if training_args.gradient_checkpointing:
|
678 |
-
if hasattr(model, "enable_input_require_grads"):
|
679 |
-
model.enable_input_require_grads()
|
680 |
-
else:
|
681 |
-
def make_inputs_require_grad(module, input, output):
|
682 |
-
output.requires_grad_(True)
|
683 |
-
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
684 |
-
|
685 |
-
if training_args.lora_enable:
|
686 |
-
from peft import LoraConfig, get_peft_model
|
687 |
-
lora_config = LoraConfig(
|
688 |
-
r=training_args.lora_r,
|
689 |
-
lora_alpha=training_args.lora_alpha,
|
690 |
-
target_modules=find_all_linear_names(model),
|
691 |
-
lora_dropout=training_args.lora_dropout,
|
692 |
-
bias=training_args.lora_bias,
|
693 |
-
task_type="CAUSAL_LM",
|
694 |
-
)
|
695 |
-
if training_args.bits == 16:
|
696 |
-
if training_args.bf16:
|
697 |
-
model.to(torch.bfloat16)
|
698 |
-
if training_args.fp16:
|
699 |
-
model.to(torch.float16)
|
700 |
-
rank0_print("Adding LoRA adapters...")
|
701 |
-
model = get_peft_model(model, lora_config)
|
702 |
-
|
703 |
-
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
704 |
-
model_args.model_name_or_path,
|
705 |
-
cache_dir=training_args.cache_dir,
|
706 |
-
model_max_length=training_args.model_max_length,
|
707 |
-
padding_side="right",
|
708 |
-
use_fast=False,
|
709 |
-
)
|
710 |
-
|
711 |
-
|
712 |
-
tokenizer.pad_token = tokenizer.unk_token
|
713 |
-
if model_args.version in conversation_lib.conv_templates:
|
714 |
-
conversation_lib.default_conversation = conversation_lib.conv_templates[model_args.version]
|
715 |
-
else:
|
716 |
-
conversation_lib.default_conversation = conversation_lib.conv_templates["vicuna_v1"]
|
717 |
-
|
718 |
-
if not training_args.freeze_vision_model and training_args.bits in [4, 8]:
|
719 |
-
model.get_model().vision_model.to(dtype=compute_dtype, device=training_args.device)
|
720 |
-
else:
|
721 |
-
vision_tower = model.get_model().vision_model
|
722 |
-
vision_tower.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
|
723 |
-
|
724 |
-
if training_args.tune_visual_abstractor and training_args.bits in [4, 8]:
|
725 |
-
model.get_model().visual_abstractor.to(dtype=compute_dtype, device=training_args.device)
|
726 |
-
else:
|
727 |
-
visual_abstractor = model.get_model().visual_abstractor
|
728 |
-
visual_abstractor.to(dtype=torch.bfloat16 if training_args.bf16 else torch.float16, device=training_args.device)
|
729 |
-
|
730 |
-
data_args.image_processor = CLIPImageProcessor.from_pretrained(model_args.model_name_or_path)
|
731 |
-
data_args.is_multimodal = True
|
732 |
-
|
733 |
-
model.config.image_aspect_ratio = data_args.image_aspect_ratio
|
734 |
-
model.config.image_grid_pinpoints = data_args.image_grid_pinpoints
|
735 |
-
model.config.tune_visual_abstractor = model_args.tune_visual_abstractor = training_args.tune_visual_abstractor
|
736 |
-
ic(training_args.tune_visual_abstractor)
|
737 |
-
model.requires_grad_(True)
|
738 |
-
if training_args.tune_visual_abstractor:
|
739 |
-
# model.requires_grad_(False)
|
740 |
-
for p in model.get_model().visual_abstractor.parameters():
|
741 |
-
p.requires_grad = True
|
742 |
-
|
743 |
-
model.config.freeze_vision_model = training_args.freeze_vision_model
|
744 |
-
ic(training_args.freeze_vision_model)
|
745 |
-
if training_args.freeze_vision_model:
|
746 |
-
for p in model.get_model().vision_model.parameters():
|
747 |
-
p.requires_grad = False
|
748 |
-
|
749 |
-
model.config.visual_abstractor_lr = training_args.visual_abstractor_lr
|
750 |
-
|
751 |
-
|
752 |
-
if training_args.bits in [4, 8]:
|
753 |
-
from peft.tuners.lora import LoraLayer
|
754 |
-
for name, module in model.named_modules():
|
755 |
-
if isinstance(module, LoraLayer):
|
756 |
-
if training_args.bf16:
|
757 |
-
module = module.to(torch.bfloat16)
|
758 |
-
if 'norm' in name:
|
759 |
-
module = module.to(torch.float32)
|
760 |
-
if 'lm_head' in name or 'embed_tokens' in name:
|
761 |
-
if hasattr(module, 'weight'):
|
762 |
-
if training_args.bf16 and module.weight.dtype == torch.float32:
|
763 |
-
module = module.to(torch.bfloat16)
|
764 |
-
|
765 |
-
data_module = make_supervised_data_module(tokenizer=tokenizer,
|
766 |
-
data_args=data_args)
|
767 |
-
trainer = MPLUGOwl2Trainer(model=model,
|
768 |
-
tokenizer=tokenizer,
|
769 |
-
args=training_args,
|
770 |
-
**data_module)
|
771 |
-
|
772 |
-
# if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
|
773 |
-
# trainer.train(resume_from_checkpoint=True)
|
774 |
-
# else:
|
775 |
-
# trainer.train()
|
776 |
-
|
777 |
-
# TODO I dont like auto resume << REMOVE IT AND UNCOMMENT THE ABOVE CODE
|
778 |
-
trainer.train()
|
779 |
-
|
780 |
-
trainer.save_state()
|
781 |
-
|
782 |
-
model.config.use_cache = True
|
783 |
-
|
784 |
-
if training_args.lora_enable:
|
785 |
-
state_dict = get_peft_state_maybe_zero_3(
|
786 |
-
model.named_parameters(), training_args.lora_bias
|
787 |
-
)
|
788 |
-
non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
|
789 |
-
model.named_parameters()
|
790 |
-
)
|
791 |
-
if training_args.local_rank == 0 or training_args.local_rank == -1:
|
792 |
-
model.config.save_pretrained(training_args.output_dir)
|
793 |
-
model.save_pretrained(training_args.output_dir, state_dict=state_dict)
|
794 |
-
torch.save(non_lora_state_dict, os.path.join(training_args.output_dir, 'non_lora_trainables.bin'))
|
795 |
-
else:
|
796 |
-
safe_save_model_for_hf_trainer(trainer=trainer,
|
797 |
-
output_dir=training_args.output_dir)
|
798 |
-
|
799 |
-
|
800 |
-
if __name__ == "__main__":
|
801 |
-
train()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
mplug_docowl/train/train_mem.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
# Adopted from https://github.com/lm-sys/FastChat. Below is the original copyright:
|
2 |
-
# Adopted from tatsu-lab@stanford_alpaca. Below is the original copyright:
|
3 |
-
# Make it more memory efficient by monkey patching the LLaMA model with FlashAttn.
|
4 |
-
|
5 |
-
# Need to call this before importing transformers.
|
6 |
-
from mplug_owl2.train.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
|
7 |
-
|
8 |
-
replace_llama_attn_with_flash_attn()
|
9 |
-
|
10 |
-
from mplug_owl2.train.train import train
|
11 |
-
|
12 |
-
if __name__ == "__main__":
|
13 |
-
train()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|