|
|
|
import torch |
|
import torch.nn as nn |
|
from transformers import PreTrainedModel, T5ForConditionalGeneration, T5Config, AutoTokenizer |
|
from configuration_emuru import EmuruConfig |
|
from diffusers import AutoencoderKL |
|
from einops.layers.torch import Rearrange |
|
from einops import rearrange, repeat |
|
|
|
class Emuru(PreTrainedModel): |
|
config_class = EmuruConfig |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_config) |
|
|
|
|
|
t5_config = T5Config.from_pretrained(config.t5_config) |
|
t5_config.vocab_size = len(self.tokenizer) |
|
self.T5 = T5ForConditionalGeneration(t5_config) |
|
self.T5.lm_head = nn.Identity() |
|
self.sos = nn.Embedding(1, t5_config.d_model) |
|
|
|
vae_latent_size = 8 * config.vae_channels * config.slices_per_query |
|
self.query_emb = nn.Linear(vae_latent_size, t5_config.d_model) |
|
self.t5_to_vae = nn.Linear(t5_config.d_model, vae_latent_size, bias=False) |
|
|
|
self.padding_token = nn.Parameter(torch.empty((1, vae_latent_size)), requires_grad=False) |
|
self.padding_token_threshold = nn.Parameter(torch.empty(1), requires_grad=False) |
|
|
|
|
|
self.vae = AutoencoderKL.from_pretrained(config.vae_config) |
|
self.set_training(self.vae, False) |
|
|
|
|
|
self.query_rearrange = Rearrange('b c h (w q) -> b w (q c h)', q=config.slices_per_query) |
|
self.z_rearrange = Rearrange('b w (q c h) -> b c h (w q)', c=config.vae_channels, q=config.slices_per_query) |
|
self.z_rearrange_eval = Rearrange('w b (q c h) -> b c h (w q)', c=config.vae_channels, q=config.slices_per_query) |
|
|
|
|
|
self.mse_criterion = nn.MSELoss() |
|
|
|
|
|
self.init_weights() |
|
|
|
def set_training(self, model, training): |
|
model.train() if training else model.eval() |
|
for param in model.parameters(): |
|
param.requires_grad = training |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def forward(self, text=None, img=None, input_ids=None, attention_mask=None, length=None, noise=0): |
|
|
|
|
|
... |
|
|
|
|
|
|