|
from typing import List, Optional, Tuple, Union
|
|
from dataclasses import dataclass
|
|
import copy, os
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.nn import CrossEntropyLoss
|
|
from transformers import AutoConfig, AutoModelForSeq2SeqLM, \
|
|
T5Config, T5Model, T5ForConditionalGeneration
|
|
|
|
from transformers.models.t5.modeling_t5 import T5Stack
|
|
from transformers.modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput, BaseModelOutput
|
|
from transformers.utils import ModelOutput
|
|
from transformers import DonutSwinModel, DonutImageProcessor, DonutSwinConfig
|
|
|
|
from abc import ABC, abstractmethod
|
|
import re
|
|
|
|
|
|
IGNORE_INDEX = -100
|
|
IMAGE_TOKEN_INDEX = -200
|
|
DEFAULT_IMAGE_TOKEN = "<image>"
|
|
DEFAULT_IMAGE_PATCH_TOKEN = "<im_patch>"
|
|
DEFAULT_IM_START_TOKEN = "<im_start>"
|
|
DEFAULT_IM_END_TOKEN = "<im_end>"
|
|
|
|
class UniChartVisionTower(nn.Module):
|
|
def __init__(self, vision_tower, args, delay_load=False):
|
|
super().__init__()
|
|
|
|
self.is_loaded = False
|
|
|
|
self.vision_tower_name = vision_tower
|
|
self.select_layer = args.mm_vision_select_layer
|
|
self.select_feature = getattr(args, 'mm_vision_select_feature', 'patch')
|
|
|
|
if not delay_load:
|
|
self.load_model()
|
|
else:
|
|
self.cfg_only = DonutSwinConfig.from_pretrained(self.vision_tower_name)
|
|
|
|
def load_model(self):
|
|
self.image_processor = DonutImageProcessor.from_pretrained(self.vision_tower_name)
|
|
self.vision_tower = DonutSwinModel.from_pretrained(self.vision_tower_name)
|
|
|
|
|
|
if not self.tune_vision_encoder:
|
|
self.vision_tower.requires_grad_(False)
|
|
|
|
self.is_loaded = True
|
|
|
|
def feature_select(self, image_forward_outs):
|
|
image_features = image_forward_outs.hidden_states[self.select_layer]
|
|
if self.select_feature == 'patch':
|
|
image_features = image_features[:, 1:]
|
|
elif self.select_feature == 'cls_patch':
|
|
image_features = image_features
|
|
else:
|
|
raise ValueError(f'Unexpected select feature: {self.select_feature}')
|
|
return image_features
|
|
|
|
@torch.no_grad()
|
|
def forward(self, images):
|
|
if type(images) is list:
|
|
image_features = []
|
|
for image in images:
|
|
image_forward_out = self.vision_tower(image.to(device=self.device, dtype=self.dtype).unsqueeze(0), output_hidden_states=True)
|
|
image_feature = self.feature_select(image_forward_out).to(image.dtype)
|
|
image_features.append(image_feature)
|
|
else:
|
|
image_forward_outs = self.vision_tower(images.to(device=self.device, dtype=self.dtype), output_hidden_states=True)
|
|
image_features = self.feature_select(image_forward_outs).to(images.dtype)
|
|
|
|
return image_features
|
|
|
|
@property
|
|
def dummy_feature(self):
|
|
return torch.zeros(1, self.hidden_size, device=self.device, dtype=self.dtype)
|
|
|
|
@property
|
|
def dtype(self):
|
|
return self.vision_tower.dtype
|
|
|
|
@property
|
|
def device(self):
|
|
return self.vision_tower.device
|
|
|
|
@property
|
|
def config(self):
|
|
if self.is_loaded:
|
|
return self.vision_tower.config
|
|
else:
|
|
return self.cfg_only
|
|
|
|
@property
|
|
def hidden_size(self):
|
|
return self.config.hidden_size
|
|
|
|
@property
|
|
def num_patches(self):
|
|
return (self.config.image_size // self.config.patch_size) ** 2
|
|
|
|
def build_vision_tower(vision_tower_cfg, **kwargs):
|
|
vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))
|
|
is_absolute_path_exists = os.path.exists(vision_tower)
|
|
if is_absolute_path_exists:
|
|
if 'unichart' in vision_tower:
|
|
return UniChartVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)
|
|
|
|
raise ValueError(f'Unknown vision tower: {vision_tower}')
|
|
|
|
def build_vision_projector(config, delay_load=False, **kwargs):
|
|
projector_type = getattr(config, 'mm_projector_type', 'mlp3x_gelu')
|
|
|
|
if projector_type == 'linear':
|
|
return nn.Linear(config.mm_hidden_size, config.hidden_size)
|
|
|
|
mlp_gelu_match = re.match(r'^mlp(\d+)x_gelu$', projector_type)
|
|
if mlp_gelu_match:
|
|
mlp_depth = int(mlp_gelu_match.group(1))
|
|
modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]
|
|
for _ in range(1, mlp_depth):
|
|
modules.append(nn.GELU())
|
|
modules.append(nn.Linear(config.hidden_size, config.hidden_size))
|
|
return nn.Sequential(*modules)
|
|
|
|
raise ValueError(f'Unknown projector type: {projector_type}')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LlavaMetaModel:
|
|
|
|
def __init__(self, config):
|
|
super(LlavaMetaModel, self).__init__(config)
|
|
if hasattr(config, "mm_vision_tower"):
|
|
self.vision_tower = build_vision_tower(config, delay_load=True)
|
|
self.mm_projector = build_vision_projector(self.config)
|
|
|
|
def get_vision_tower(self):
|
|
vision_tower = getattr(self, 'vision_tower', None)
|
|
if type(vision_tower) is list:
|
|
vision_tower = vision_tower[0]
|
|
return vision_tower
|
|
|
|
def initialize_vision_modules(self, model_args, fsdp=None):
|
|
vision_tower = model_args.vision_tower
|
|
mm_vision_select_layer = model_args.mm_vision_select_layer
|
|
mm_vision_select_feature = model_args.mm_vision_select_feature
|
|
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
|
|
|
|
self.config.mm_vision_tower = vision_tower
|
|
|
|
vision_tower = build_vision_tower(model_args)
|
|
|
|
if fsdp is not None and len(fsdp) > 0:
|
|
self.vision_tower = [vision_tower]
|
|
else:
|
|
self.vision_tower = vision_tower
|
|
|
|
self.config.use_mm_proj = True
|
|
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
|
|
self.config.mm_hidden_size = vision_tower.hidden_size
|
|
self.config.mm_vision_select_layer = mm_vision_select_layer
|
|
self.config.mm_vision_select_feature = mm_vision_select_feature
|
|
|
|
if not hasattr(self, 'mm_projector'):
|
|
self.mm_projector = build_vision_projector(self.config)
|
|
|
|
if pretrain_mm_mlp_adapter is not None:
|
|
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
|
|
def get_w(weights, keyword):
|
|
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
|
|
|
|
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
|
|
|
|
|
|
class LlavaMetaForCausalLM(ABC):
|
|
|
|
@abstractmethod
|
|
def get_model(self):
|
|
pass
|
|
|
|
def get_vision_tower(self):
|
|
return self.get_model().get_vision_tower()
|
|
|
|
def encode_images(self, images):
|
|
image_features = self.get_model().get_vision_tower()(images)
|
|
image_features = self.get_model().mm_projector(image_features)
|
|
return image_features
|
|
|
|
def prepare_inputs_labels_for_multimodal(
|
|
self, input_ids, attention_mask, past_key_values, labels, images
|
|
):
|
|
vision_tower = self.get_vision_tower()
|
|
if vision_tower is None or images is None or input_ids.shape[1] == 1:
|
|
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
|
|
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
|
|
return input_ids, attention_mask, past_key_values, None, labels
|
|
|
|
if type(images) is list or images.ndim == 5:
|
|
concat_images = torch.cat([image for image in images], dim=0)
|
|
image_features = self.encode_images(concat_images)
|
|
split_sizes = [image.shape[0] for image in images]
|
|
image_features = torch.split(image_features, split_sizes, dim=0)
|
|
image_features = [x.flatten(0, 1) for x in image_features]
|
|
else:
|
|
image_features = self.encode_images(images)
|
|
|
|
new_input_embeds = []
|
|
new_labels = [] if labels is not None else None
|
|
cur_image_idx = 0
|
|
for batch_idx, cur_input_ids in enumerate(input_ids):
|
|
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
|
|
|
|
cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)
|
|
cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()
|
|
new_input_embeds.append(cur_input_embeds)
|
|
if labels is not None:
|
|
new_labels.append(labels[batch_idx])
|
|
cur_image_idx += 1
|
|
continue
|
|
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
|
cur_new_input_embeds = []
|
|
if labels is not None:
|
|
cur_labels = labels[batch_idx]
|
|
cur_new_labels = []
|
|
assert cur_labels.shape == cur_input_ids.shape
|
|
while image_token_indices.numel() > 0:
|
|
cur_image_features = image_features[cur_image_idx]
|
|
image_token_start = image_token_indices[0]
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
|
|
cur_new_input_embeds.append(cur_image_features)
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels[:image_token_start])
|
|
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
|
|
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
|
|
cur_labels = cur_labels[image_token_start+2:]
|
|
else:
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
|
|
cur_new_input_embeds.append(cur_image_features)
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels[:image_token_start])
|
|
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
|
|
cur_labels = cur_labels[image_token_start+1:]
|
|
cur_image_idx += 1
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
|
|
cur_input_ids = cur_input_ids[image_token_start+2:]
|
|
else:
|
|
cur_input_ids = cur_input_ids[image_token_start+1:]
|
|
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
|
if cur_input_ids.numel() > 0:
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())
|
|
else:
|
|
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels)
|
|
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
|
|
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
|
|
new_input_embeds.append(cur_new_input_embeds)
|
|
if labels is not None:
|
|
cur_new_labels = torch.cat(cur_new_labels, dim=0)
|
|
new_labels.append(cur_new_labels)
|
|
|
|
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
|
|
max_len = max(x.shape[0] for x in new_input_embeds)
|
|
|
|
new_input_embeds_align = []
|
|
for cur_new_embed in new_input_embeds:
|
|
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
|
|
new_input_embeds_align.append(cur_new_embed)
|
|
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
|
|
|
|
if labels is not None:
|
|
new_labels_align = []
|
|
_new_labels = new_labels
|
|
for cur_new_label in new_labels:
|
|
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
|
|
new_labels_align.append(cur_new_label)
|
|
new_labels = torch.stack(new_labels_align, dim=0)
|
|
|
|
if attention_mask is not None:
|
|
new_attention_mask = []
|
|
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
|
|
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
|
|
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
|
|
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
|
|
new_attention_mask.append(cur_new_attention_mask)
|
|
attention_mask = torch.stack(new_attention_mask, dim=0)
|
|
assert attention_mask.shape == new_labels.shape
|
|
else:
|
|
new_input_embeds = torch.stack(new_input_embeds, dim=0)
|
|
if labels is not None:
|
|
new_labels = torch.stack(new_labels, dim=0)
|
|
|
|
if attention_mask is not None:
|
|
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
|
|
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
|
|
assert attention_mask.shape == new_input_embeds.shape[:2]
|
|
|
|
return None, attention_mask, past_key_values, new_input_embeds, new_labels
|
|
|
|
def initialize_vision_tokenizer(self, model_args, tokenizer):
|
|
if model_args.mm_use_im_patch_token:
|
|
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
|
|
self.resize_token_embeddings(len(tokenizer))
|
|
|
|
if model_args.mm_use_im_start_end:
|
|
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
|
|
self.resize_token_embeddings(len(tokenizer))
|
|
|
|
if num_new_tokens > 0:
|
|
input_embeddings = self.get_input_embeddings().weight.data
|
|
output_embeddings = self.get_output_embeddings().weight.data
|
|
|
|
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
|
|
dim=0, keepdim=True)
|
|
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
|
|
dim=0, keepdim=True)
|
|
|
|
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
|
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
|
|
|
if model_args.tune_mm_mlp_adapter:
|
|
for p in self.get_input_embeddings().parameters():
|
|
p.requires_grad = True
|
|
for p in self.get_output_embeddings().parameters():
|
|
p.requires_grad = False
|
|
|
|
if model_args.pretrain_mm_mlp_adapter:
|
|
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
|
|
embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
|
|
assert num_new_tokens == 2
|
|
if input_embeddings.shape == embed_tokens_weight.shape:
|
|
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
|
|
elif embed_tokens_weight.shape[0] == num_new_tokens:
|
|
input_embeddings[-num_new_tokens:] = embed_tokens_weight
|
|
else:
|
|
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
|
|
elif model_args.mm_use_im_patch_token:
|
|
if model_args.tune_mm_mlp_adapter:
|
|
for p in self.get_input_embeddings().parameters():
|
|
p.requires_grad = False
|
|
for p in self.get_output_embeddings().parameters():
|
|
p.requires_grad = False
|
|
|
|
|
|
|
|
class LlavaMetaForConditionalGeneration(ABC):
|
|
|
|
def get_vision_tower(self):
|
|
return self.get_encoder().get_vision_tower()
|
|
|
|
def encode_images(self, images):
|
|
image_features = self.get_encoder().get_vision_tower()(images)
|
|
image_features = self.get_encoder().mm_projector(image_features)
|
|
return image_features
|
|
|
|
def prepare_inputs_labels_for_multimodal(
|
|
self, input_ids, attention_mask, labels, images
|
|
):
|
|
vision_tower = self.get_vision_tower()
|
|
if vision_tower is None or images is None or input_ids.shape[1] == 1:
|
|
return input_ids, attention_mask, None
|
|
|
|
if type(images) is list or images.ndim == 5:
|
|
concat_images = torch.cat([image for image in images], dim=0)
|
|
image_features = self.encode_images(concat_images)
|
|
split_sizes = [image.shape[0] for image in images]
|
|
image_features = torch.split(image_features, split_sizes, dim=0)
|
|
image_features = [x.flatten(0, 1) for x in image_features]
|
|
else:
|
|
image_features = self.encode_images(images)
|
|
|
|
|
|
if labels is None:
|
|
labels = torch.full_like(input_ids, IGNORE_INDEX)
|
|
|
|
|
|
new_input_embeds = []
|
|
new_labels = [] if labels is not None else None
|
|
cur_image_idx = 0
|
|
for batch_idx, cur_input_ids in enumerate(input_ids):
|
|
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
|
|
|
|
cur_input_embeds = self.get_encoder().embed_tokens(cur_input_ids)
|
|
cur_input_embeds = cur_input_embeds + (0. * self.get_encoder().mm_projector(vision_tower.dummy_feature)).sum()
|
|
new_input_embeds.append(cur_input_embeds)
|
|
if labels is not None:
|
|
new_labels.append(labels[batch_idx])
|
|
cur_image_idx += 1
|
|
continue
|
|
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
|
cur_new_input_embeds = []
|
|
if labels is not None:
|
|
cur_labels = labels[batch_idx]
|
|
cur_new_labels = []
|
|
assert cur_labels.shape == cur_input_ids.shape
|
|
while image_token_indices.numel() > 0:
|
|
cur_image_features = image_features[cur_image_idx]
|
|
image_token_start = image_token_indices[0]
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
|
|
cur_new_input_embeds.append(self.get_encoder().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
|
|
cur_new_input_embeds.append(self.get_encoder().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
|
|
cur_new_input_embeds.append(cur_image_features)
|
|
cur_new_input_embeds.append(self.get_encoder().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels[:image_token_start])
|
|
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
|
|
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
|
|
cur_labels = cur_labels[image_token_start+2:]
|
|
else:
|
|
cur_new_input_embeds.append(self.get_encoder().embed_tokens(cur_input_ids[:image_token_start]))
|
|
cur_new_input_embeds.append(cur_image_features)
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels[:image_token_start])
|
|
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
|
|
cur_labels = cur_labels[image_token_start+1:]
|
|
cur_image_idx += 1
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
|
|
cur_input_ids = cur_input_ids[image_token_start+2:]
|
|
else:
|
|
cur_input_ids = cur_input_ids[image_token_start+1:]
|
|
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
|
|
if cur_input_ids.numel() > 0:
|
|
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
|
|
cur_new_input_embeds.append(self.get_encoder().embed_tokens(cur_input_ids).detach())
|
|
else:
|
|
cur_new_input_embeds.append(self.get_encoder().embed_tokens(cur_input_ids))
|
|
if labels is not None:
|
|
cur_new_labels.append(cur_labels)
|
|
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
|
|
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
|
|
new_input_embeds.append(cur_new_input_embeds)
|
|
if labels is not None:
|
|
cur_new_labels = torch.cat(cur_new_labels, dim=0)
|
|
new_labels.append(cur_new_labels)
|
|
|
|
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
|
|
max_len = max(x.shape[0] for x in new_input_embeds)
|
|
|
|
new_input_embeds_align = []
|
|
for cur_new_embed in new_input_embeds:
|
|
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
|
|
new_input_embeds_align.append(cur_new_embed)
|
|
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
|
|
|
|
if labels is not None:
|
|
new_labels_align = []
|
|
_new_labels = new_labels
|
|
for cur_new_label in new_labels:
|
|
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
|
|
new_labels_align.append(cur_new_label)
|
|
new_labels = torch.stack(new_labels_align, dim=0)
|
|
|
|
if attention_mask is not None:
|
|
new_attention_mask = []
|
|
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
|
|
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
|
|
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
|
|
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
|
|
new_attention_mask.append(cur_new_attention_mask)
|
|
attention_mask = torch.stack(new_attention_mask, dim=0)
|
|
assert attention_mask.shape == new_labels.shape
|
|
else:
|
|
new_input_embeds = torch.stack(new_input_embeds, dim=0)
|
|
if labels is not None:
|
|
new_labels = torch.stack(new_labels, dim=0)
|
|
|
|
if attention_mask is not None:
|
|
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
|
|
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
|
|
assert attention_mask.shape == new_input_embeds.shape[:2]
|
|
|
|
return None, attention_mask, new_input_embeds, new_labels
|
|
|
|
def initialize_vision_tokenizer(self, model_args, tokenizer):
|
|
if model_args.mm_use_im_patch_token:
|
|
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
|
|
self.resize_token_embeddings(len(tokenizer))
|
|
|
|
if model_args.mm_use_im_start_end:
|
|
num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
|
|
self.resize_token_embeddings(len(tokenizer))
|
|
|
|
if num_new_tokens > 0:
|
|
input_embeddings = self.get_input_embeddings().weight.data
|
|
output_embeddings = self.get_output_embeddings().weight.data
|
|
|
|
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
|
|
dim=0, keepdim=True)
|
|
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
|
|
dim=0, keepdim=True)
|
|
|
|
input_embeddings[-num_new_tokens:] = input_embeddings_avg
|
|
output_embeddings[-num_new_tokens:] = output_embeddings_avg
|
|
|
|
if model_args.tune_mm_mlp_adapter:
|
|
for p in self.get_input_embeddings().parameters():
|
|
p.requires_grad = True
|
|
for p in self.get_output_embeddings().parameters():
|
|
p.requires_grad = False
|
|
|
|
if model_args.pretrain_mm_mlp_adapter:
|
|
mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')
|
|
embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']
|
|
assert num_new_tokens == 2
|
|
if input_embeddings.shape == embed_tokens_weight.shape:
|
|
input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]
|
|
elif embed_tokens_weight.shape[0] == num_new_tokens:
|
|
input_embeddings[-num_new_tokens:] = embed_tokens_weight
|
|
else:
|
|
raise ValueError(f"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.")
|
|
elif model_args.mm_use_im_patch_token:
|
|
if model_args.tune_mm_mlp_adapter:
|
|
for p in self.get_input_embeddings().parameters():
|
|
p.requires_grad = False
|
|
for p in self.get_output_embeddings().parameters():
|
|
p.requires_grad = False
|
|
|
|
class LlavaMetaT5Model:
|
|
|
|
def __init__(self, config, embed_tokens):
|
|
super(LlavaMetaT5Model, self).__init__(config, embed_tokens)
|
|
if hasattr(config, "mm_vision_tower"):
|
|
self.vision_tower = build_vision_tower(config, delay_load=True)
|
|
self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)
|
|
|
|
def get_vision_tower(self):
|
|
vision_tower = getattr(self, 'vision_tower', None)
|
|
if type(vision_tower) is list:
|
|
vision_tower = vision_tower[0]
|
|
return vision_tower
|
|
|
|
def initialize_vision_modules(self, model_args, fsdp=None):
|
|
vision_tower = model_args.vision_tower
|
|
mm_vision_select_layer = model_args.mm_vision_select_layer
|
|
mm_vision_select_feature = model_args.mm_vision_select_feature
|
|
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
|
|
|
|
self.config.mm_vision_tower = vision_tower
|
|
|
|
vision_tower = build_vision_tower(model_args)
|
|
|
|
if fsdp is not None and len(fsdp) > 0:
|
|
self.vision_tower = [vision_tower]
|
|
else:
|
|
self.vision_tower = vision_tower
|
|
|
|
self.config.use_mm_proj = True
|
|
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
|
|
self.config.mm_hidden_size = vision_tower.hidden_size
|
|
self.config.mm_vision_select_layer = mm_vision_select_layer
|
|
self.config.mm_vision_select_feature = mm_vision_select_feature
|
|
|
|
if not hasattr(self, 'mm_projector'):
|
|
self.mm_projector = build_vision_projector(self.config)
|
|
|
|
if pretrain_mm_mlp_adapter is not None:
|
|
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
|
|
def get_w(weights, keyword):
|
|
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
|
|
|
|
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
class BaseModelOutputWithPastAndCrossAttentionsWithAttentionMask(ModelOutput):
|
|
"""
|
|
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
|
|
|
|
Args:
|
|
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
|
|
Sequence of hidden-states at the output of the last layer of the model.
|
|
|
|
If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
|
|
hidden_size)` is output.
|
|
past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
|
|
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
|
|
`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
|
|
`config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
|
|
encoder_sequence_length, embed_size_per_head)`.
|
|
|
|
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
|
|
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
|
|
input) to speed up sequential decoding.
|
|
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
|
|
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
|
|
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
|
|
|
|
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
|
|
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
sequence_length)`.
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
heads.
|
|
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
|
|
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
|
|
sequence_length)`.
|
|
|
|
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
|
|
weighted average in the cross-attention heads.
|
|
"""
|
|
|
|
last_hidden_state: torch.FloatTensor = None
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
|
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
attention_mask: Optional[torch.LongTensor] = None
|
|
|
|
class LlavaT5Config(T5Config):
|
|
model_type = "llava_t5"
|
|
|
|
class LlavaT5Stack(LlavaMetaT5Model, T5Stack):
|
|
config_class = LlavaT5Config
|
|
|
|
def __init__(self, config: T5Config, embed_tokens=None):
|
|
super(LlavaT5Stack, self).__init__(config, embed_tokens)
|
|
|
|
|
|
class LlavaT5ForConditionalGeneration(T5ForConditionalGeneration, LlavaMetaForConditionalGeneration):
|
|
config_class = LlavaT5Config
|
|
|
|
def __init__(self, config):
|
|
super(T5ForConditionalGeneration, self).__init__(config)
|
|
|
|
self.model_dim = config.d_model
|
|
|
|
self.shared = nn.Embedding(config.vocab_size, config.d_model)
|
|
|
|
encoder_config = copy.deepcopy(config)
|
|
encoder_config.is_decoder = False
|
|
encoder_config.use_cache = False
|
|
encoder_config.is_encoder_decoder = False
|
|
self.encoder = LlavaT5Stack(encoder_config, self.shared)
|
|
|
|
decoder_config = copy.deepcopy(config)
|
|
decoder_config.is_decoder = True
|
|
decoder_config.is_encoder_decoder = False
|
|
decoder_config.num_layers = config.num_decoder_layers
|
|
self.decoder = T5Stack(decoder_config, self.shared)
|
|
|
|
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
|
|
|
|
|
|
self.post_init()
|
|
|
|
|
|
self.model_parallel = False
|
|
self.device_map = None
|
|
|
|
def get_model(self):
|
|
return self.encoder
|
|
def get_encoder(self):
|
|
return self.encoder
|
|
def get_decoder(self):
|
|
return self.decoder
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor = None,
|
|
attention_mask: Optional[torch.Tensor] = None,
|
|
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
|
inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
labels: Optional[torch.LongTensor] = None,
|
|
use_cache: Optional[bool] = None,
|
|
output_attentions: Optional[bool] = None,
|
|
output_hidden_states: Optional[bool] = None,
|
|
images: Optional[torch.FloatTensor] = None,
|
|
return_dict: Optional[bool] = None,
|
|
|
|
decoder_input_ids: Optional[torch.LongTensor] = None,
|
|
decoder_attention_mask: Optional[torch.BoolTensor] = None,
|
|
head_mask: Optional[torch.FloatTensor] = None,
|
|
decoder_head_mask: Optional[torch.FloatTensor] = None,
|
|
cross_attn_head_mask: Optional[torch.Tensor] = None,
|
|
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
|
|
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
|
|
|
|
) -> Union[Tuple, Seq2SeqLMOutput]:
|
|
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
|
|
|
|
if head_mask is not None and decoder_head_mask is None:
|
|
if self.config.num_layers == self.config.num_decoder_layers:
|
|
|
|
decoder_head_mask = head_mask
|
|
|
|
|
|
if encoder_outputs is None:
|
|
input_ids, attention_mask, inputs_embeds, _ = self.prepare_inputs_labels_for_multimodal(input_ids,
|
|
attention_mask,
|
|
None,
|
|
images
|
|
)
|
|
|
|
encoder_outputs = self.encoder(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
head_mask=head_mask,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
|
|
encoder_outputs = BaseModelOutput(
|
|
last_hidden_state=encoder_outputs[0],
|
|
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
|
|
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
|
|
)
|
|
|
|
hidden_states = encoder_outputs[0]
|
|
|
|
if self.model_parallel:
|
|
torch.cuda.set_device(self.decoder.first_device)
|
|
|
|
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
|
|
|
|
decoder_input_ids = self._shift_right(labels)
|
|
|
|
|
|
if self.model_parallel:
|
|
torch.cuda.set_device(self.decoder.first_device)
|
|
hidden_states = hidden_states.to(self.decoder.first_device)
|
|
if decoder_input_ids is not None:
|
|
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
|
|
if attention_mask is not None:
|
|
attention_mask = attention_mask.to(self.decoder.first_device)
|
|
if decoder_attention_mask is not None:
|
|
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
|
|
|
|
|
|
|
|
decoder_outputs = self.decoder(
|
|
input_ids=decoder_input_ids,
|
|
attention_mask=decoder_attention_mask,
|
|
inputs_embeds=decoder_inputs_embeds,
|
|
past_key_values=past_key_values,
|
|
encoder_hidden_states=hidden_states,
|
|
encoder_attention_mask=attention_mask,
|
|
head_mask=decoder_head_mask,
|
|
cross_attn_head_mask=cross_attn_head_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
sequence_output = decoder_outputs[0]
|
|
|
|
|
|
if self.model_parallel:
|
|
torch.cuda.set_device(self.encoder.first_device)
|
|
self.lm_head = self.lm_head.to(self.encoder.first_device)
|
|
sequence_output = sequence_output.to(self.lm_head.weight.device)
|
|
|
|
if self.config.tie_word_embeddings:
|
|
|
|
|
|
sequence_output = sequence_output * (self.model_dim**-0.5)
|
|
|
|
lm_logits = self.lm_head(sequence_output)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
loss_fct = CrossEntropyLoss(ignore_index=-100)
|
|
|
|
labels = labels.to(lm_logits.device)
|
|
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
|
|
|
|
|
|
if not return_dict:
|
|
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return Seq2SeqLMOutput(
|
|
loss=loss,
|
|
logits=lm_logits,
|
|
past_key_values=decoder_outputs.past_key_values,
|
|
decoder_hidden_states=decoder_outputs.hidden_states,
|
|
decoder_attentions=decoder_outputs.attentions,
|
|
cross_attentions=decoder_outputs.cross_attentions,
|
|
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
|
|
encoder_hidden_states=encoder_outputs.hidden_states,
|
|
encoder_attentions=encoder_outputs.attentions,
|
|
)
|
|
|
|
def prepare_inputs_for_generation(
|
|
self,
|
|
input_ids,
|
|
past_key_values=None,
|
|
attention_mask=None,
|
|
head_mask=None,
|
|
decoder_head_mask=None,
|
|
decoder_attention_mask=None,
|
|
cross_attn_head_mask=None,
|
|
use_cache=None,
|
|
encoder_outputs=None,
|
|
**kwargs,
|
|
):
|
|
|
|
if past_key_values is not None:
|
|
past_length = past_key_values[0][0].shape[2]
|
|
|
|
|
|
if input_ids.shape[1] > past_length:
|
|
remove_prefix_length = past_length
|
|
else:
|
|
|
|
remove_prefix_length = input_ids.shape[1] - 1
|
|
|
|
input_ids = input_ids[:, remove_prefix_length:]
|
|
|
|
return {
|
|
"decoder_input_ids": input_ids,
|
|
"past_key_values": past_key_values,
|
|
"encoder_outputs": encoder_outputs,
|
|
"attention_mask": attention_mask,
|
|
"head_mask": head_mask,
|
|
"decoder_head_mask": decoder_head_mask,
|
|
"decoder_attention_mask": decoder_attention_mask,
|
|
"cross_attn_head_mask": cross_attn_head_mask,
|
|
"use_cache": use_cache,
|
|
"images": kwargs.get("images", None),
|
|
}
|
|
|