|
|
|
import torch |
|
import torch.nn as nn |
|
from transformers import PreTrainedModel, T5ForConditionalGeneration, T5Config, AutoTokenizer |
|
from configuration_emuru import EmuruConfig |
|
|
|
from diffusers import AutoencoderKL |
|
from einops.layers.torch import Rearrange |
|
from einops import rearrange, repeat |
|
|
|
class Emuru(PreTrainedModel): |
|
config_class = EmuruConfig |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_config) |
|
|
|
|
|
t5_config = T5Config.from_pretrained(config.t5_config) |
|
t5_config.vocab_size = len(self.tokenizer) |
|
self.T5 = T5ForConditionalGeneration(t5_config) |
|
self.T5.lm_head = nn.Identity() |
|
self.sos = nn.Embedding(1, t5_config.d_model) |
|
|
|
vae_latent_size = 8 * config.vae_channels * config.slices_per_query |
|
self.vae_to_t5 = nn.Linear(vae_latent_size, t5_config.d_model) |
|
self.t5_to_vae = nn.Linear(t5_config.d_model, vae_latent_size, bias=False) |
|
|
|
self.padding_token = nn.Parameter(torch.empty((1, vae_latent_size)), requires_grad=False) |
|
self.padding_token_threshold = nn.Parameter(torch.empty(1), requires_grad=False) |
|
|
|
|
|
self.vae = AutoencoderKL.from_pretrained(config.vae_config) |
|
self.set_training(self.vae, False) |
|
|
|
|
|
self.query_rearrange = Rearrange('b c h (w q) -> b w (q c h)', q=config.slices_per_query) |
|
self.z_rearrange = Rearrange('b w (q c h) -> b c h (w q)', c=config.vae_channels, q=config.slices_per_query) |
|
|
|
|
|
self.mse_criterion = nn.MSELoss() |
|
|
|
|
|
self.init_weights() |
|
|
|
|
|
def set_training(self, model, training): |
|
model.train() if training else model.eval() |
|
for param in model.parameters(): |
|
param.requires_grad = training |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, img=None, input_ids=None, attention_mask=None, noise=0, **kwargs): |
|
decoder_inputs_embeds, z_sequence, z = self._img_encode(img, noise) |
|
|
|
output = self.T5(input_ids, attention_mask=attention_mask, decoder_inputs_embeds=decoder_inputs_embeds) |
|
vae_latent = self.t5_to_vae(output.logits[:, :-1]) |
|
pred_latent = self.z_rearrange(vae_latent) |
|
|
|
mse_loss = self.mse_criterion(vae_latent, z_sequence) |
|
return mse_loss, pred_latent, z |
|
|
|
|
|
def old_generate(self, text=None, img=None, z_sequence=None, input_ids=None, max_new_tokens=256, |
|
stopping_criteria='latent', stopping_after=10, stopping_errors=1): |
|
assert text is not None or input_ids is not None, 'Either text or input_ids must be provided' |
|
assert img is not None or z_sequence is not None, 'Either img or z_sequence must be provided' |
|
|
|
if input_ids is None: |
|
input_ids = self.tokenizer(text, return_tensors='pt', padding=True).input_ids |
|
input_ids = input_ids.to(next(self.T5.parameters()).device) |
|
|
|
if z_sequence is None: |
|
_, z_sequence, _ = self._img_encode(img) |
|
z_sequence = [z_sequence] |
|
|
|
sos = repeat(self.sos.weight, '1 d -> b 1 d', b=input_ids.size(0)) |
|
for _ in range(max_new_tokens): |
|
if len(z_sequence) == 0: |
|
decoder_inputs_embeds = sos |
|
else: |
|
decoder_inputs_embeds = self.vae_to_t5(torch.cat(z_sequence, dim=1)) |
|
decoder_inputs_embeds = torch.cat([sos, decoder_inputs_embeds], dim=1) |
|
output = self.T5(input_ids, decoder_inputs_embeds=decoder_inputs_embeds) |
|
vae_latent = self.t5_to_vae(output.logits[:, -1:]) |
|
z_sequence.append(vae_latent) |
|
|
|
if stopping_criteria == 'latent': |
|
curr_z_sequence = torch.cat(z_sequence, dim=1) |
|
pad_token = repeat(self.padding_token, '1 d -> b 1 d', b=input_ids.size(0)).to(decoder_inputs_embeds.device) |
|
similarity = torch.nn.functional.cosine_similarity(curr_z_sequence, pad_token, dim=-1) |
|
similarity = similarity[:, -stopping_after:] > self.padding_token_threshold |
|
if torch.all(similarity.sum(-1) >= (stopping_after - stopping_errors)): |
|
|
|
z_sequence = [curr_z_sequence] |
|
break |
|
elif stopping_criteria == 'pixel': |
|
raise NotImplementedError |
|
|
|
z_sequence = torch.cat(z_sequence, dim=1) |
|
img = torch.clamp(self.vae.decode(self.z_rearrange(z_sequence)).sample, -1, 1) |
|
return img |
|
|
|
|
|
def generate(self, |
|
style_text=None, |
|
gen_text=None, |
|
style_img=None, |
|
input_ids=None, |
|
z_sequence=None, |
|
max_new_tokens=256, |
|
stopping_criteria='latent', |
|
stopping_after=10, |
|
stopping_patience=1, |
|
trim_image=True): |
|
assert (gen_text is not None and style_text is not None) or input_ids is not None, 'Either gen_text and style_text or input_ids must be provided' |
|
assert style_img is not None or z_sequence is not None, 'Either style_img or z_sequence must be provided' |
|
|
|
if input_ids is None: |
|
input_ids = self.tokenizer(gen_text + ' ' + style_text, return_tensors='pt', padding=True).input_ids |
|
input_ids = input_ids.to(self.device) |
|
|
|
if z_sequence is None: |
|
_, z_sequence, _ = self._img_encode(style_img) |
|
z_sequence = [z_sequence] |
|
|
|
sos = repeat(self.sos.weight, '1 d -> b 1 d', b=input_ids.size(0)) |
|
pad_token = repeat(self.padding_token, '1 d -> b 1 d', b=input_ids.size(0)) |
|
|
|
for _ in range(max_new_tokens): |
|
if len(z_sequence) == 0: |
|
decoder_inputs_embeds = sos |
|
else: |
|
decoder_inputs_embeds = self.vae_to_t5(torch.cat(z_sequence, dim=1)) |
|
decoder_inputs_embeds = torch.cat([sos, decoder_inputs_embeds], dim=1) |
|
output = self.T5(input_ids, decoder_inputs_embeds=decoder_inputs_embeds) |
|
vae_latent = self.t5_to_vae(output.logits[:, -1:]) |
|
z_sequence.append(vae_latent) |
|
|
|
if stopping_criteria == 'latent': |
|
curr_z_sequence = torch.cat(z_sequence, dim=1) |
|
similarity = torch.nn.functional.cosine_similarity(curr_z_sequence, pad_token, dim=-1) |
|
similarity = similarity[:, -stopping_after:] > self.padding_token_threshold |
|
if torch.all(similarity.sum(-1) >= (stopping_after - stopping_patience)): |
|
z_sequence = [curr_z_sequence[:, :-similarity.sum(-1)]] if trim_image else [curr_z_sequence] |
|
break |
|
elif stopping_criteria == 'pixel': |
|
raise NotImplementedError |
|
|
|
z_sequence = torch.cat(z_sequence, dim=1) |
|
img = torch.clamp(self.vae.decode(self.z_rearrange(z_sequence)).sample, -1, 1) |
|
return img, z_sequence |
|
|
|
|
|
def _img_encode(self, img, noise=0): |
|
posterior = self.vae.encode(img.float()) |
|
z = posterior.latent_dist.sample() |
|
z_sequence = self.query_rearrange(z) |
|
|
|
noise_sequence = z_sequence |
|
if noise > 0: |
|
noise_sequence = z_sequence + torch.randn_like(z_sequence) * noise |
|
|
|
decoder_inputs_embeds = self.vae_to_t5(noise_sequence) |
|
sos = repeat(self.sos.weight, '1 d -> b 1 d', b=decoder_inputs_embeds.size(0)) |
|
decoder_inputs_embeds = torch.cat([sos, decoder_inputs_embeds], dim=1) |
|
return decoder_inputs_embeds, z_sequence, z |
|
|
|
|
|
def compute_padding_token(self): |
|
raise NotImplementedError("compute_padding_token not implemented") |
|
|
|
|
|
def compute_padding_token_threshold(self): |
|
raise NotImplementedError("compute_padding_token_threshold not implemented") |