diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..19f846cfea6e8d0869d82423a206db6e52aaa6ff 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+*.jpg filter=lfs diff=lfs merge=lfs -text
+*.png filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..2b909e8c2d98b4ff3bf744a9b54d94084312eb8f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,146 @@
+datasets/*
+experiments/*
+results/*
+tb_logger/*
+wandb/*
+tmp/*
+weights/*
+inputs/*
+models/*
+comparisons/*
+flux_dev_fp8_quantized_model.pth
+array_outputs/*
+
+*.DS_Store
+
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+.idea/
\ No newline at end of file
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..185acc3918438ea6399123b091204dd9d21f26e2
--- /dev/null
+++ b/app.py
@@ -0,0 +1,461 @@
+import time
+
+import gradio as gr
+import spaces
+import numpy as np
+import torch
+from einops import rearrange, repeat
+from PIL import Image
+
+from flux.sampling import denoise, get_noise, get_schedule, prepare, rf_denoise, rf_inversion, unpack
+from flux.util import (
+ SamplingOptions,
+ load_ae,
+ load_clip,
+ load_flow_model,
+ load_flow_model_quintized,
+ load_t5,
+)
+from pulid.pipeline_flux import PuLIDPipeline
+from pulid.utils import resize_numpy_image_long, seed_everything
+
+
+def get_models(name: str, device: torch.device, offload: bool, fp8: bool):
+ t5 = load_t5(device, max_length=128)
+ clip = load_clip(device)
+ if fp8:
+ model = load_flow_model_quintized(name, device="cpu" if offload else device)
+ else:
+ model = load_flow_model(name, device="cpu" if offload else device)
+ model.eval()
+ ae = load_ae(name, device="cpu" if offload else device)
+ return model, ae, t5, clip
+
+
+class FluxGenerator:
+ def __init__(self, model_name: str, device: str, offload: bool, aggressive_offload: bool, args):
+ self.device = torch.device(device)
+ self.offload = offload
+ self.aggressive_offload = aggressive_offload
+ self.model_name = model_name
+ self.model, self.ae, self.t5, self.clip_model = get_models(
+ model_name,
+ device=self.device,
+ offload=self.offload,
+ fp8=args.fp8,
+ )
+ self.pulid_model = PuLIDPipeline(self.model, device="cpu" if offload else device, weight_dtype=torch.bfloat16,
+ onnx_provider=args.onnx_provider)
+ if offload:
+ self.pulid_model.face_helper.face_det.mean_tensor = self.pulid_model.face_helper.face_det.mean_tensor.to(torch.device("cuda"))
+ self.pulid_model.face_helper.face_det.device = torch.device("cuda")
+ self.pulid_model.face_helper.device = torch.device("cuda")
+ self.pulid_model.device = torch.device("cuda")
+ self.pulid_model.load_pretrain(args.pretrained_model, version=args.version)
+
+ # function to encode an image into latents
+ def encode_image_to_latents(self, img, opts):
+ """
+ Opposite of decode: Takes a PIL image and encodes it into latents (x).
+ """
+ t0 = time.perf_counter()
+
+ # Resize if necessary, or use opts.height / opts.width if you want a fixed size:
+ img = img.resize((opts.width, opts.height), resample=Image.LANCZOS)
+
+ # Convert image to torch.Tensor and scale to [-1, 1]
+ # Image is in [0, 255] → scale to [0,1] → then map to [-1,1].
+ x = np.array(img).astype(np.float32)
+ x = torch.from_numpy(x) # shape: (H, W, C)
+ x = (x / 127.5) - 1.0 # now in [-1, 1]
+ x = rearrange(x, "h w c -> 1 c h w") # shape: (1, C, H, W)
+
+ # Move encoder to device if you are offloading
+ if self.offload:
+ self.ae.encoder.to(self.device)
+
+ x = x.to(self.device, dtype=torch.bfloat16)
+
+ # 2) Encode with autocast
+ with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):
+ x = self.ae.encode(x)
+
+ x = x.to(torch.bfloat16)
+
+
+ # 3) Offload if needed
+ if self.offload:
+ self.ae.encoder.cpu()
+ torch.cuda.empty_cache()
+
+ t1 = time.perf_counter()
+ print(f"Encoded in {t1 - t0:.2f} seconds.")
+
+ return x
+
+ @spaces.GPU
+ @torch.inference_mode()
+ def generate_image(
+ self,
+ prompt: str,
+ id_image = None,
+ width: int = 512,
+ height: int = 512,
+ num_steps: int = 20,
+ start_step: int = 0,
+ guidance: float = 4.0,
+ seed: int = -1,
+ id_weight: float = 1.0,
+ neg_prompt: str = "",
+ true_cfg: float = 1.0,
+ timestep_to_start_cfg: int = 1,
+ max_sequence_length: int = 128,
+ gamma: float = 0.5,
+ eta: float = 0.7,
+ s: float = 0,
+ tau: float = 5,
+ perform_inversion: bool = True,
+ perform_reconstruction: bool = False,
+ perform_editing: bool = True,
+ inversion_true_cfg: float = 1.0,
+ ):
+ """
+ Core function that performs the image generation.
+ """
+ self.t5.max_length = max_sequence_length
+
+ # If seed == -1, random
+ seed = int(seed)
+ if seed == -1:
+ seed = None
+
+ opts = SamplingOptions(
+ prompt=prompt,
+ width=width,
+ height=height,
+ num_steps=num_steps,
+ guidance=guidance,
+ seed=seed,
+ )
+
+ if opts.seed is None:
+ opts.seed = torch.Generator(device="cpu").seed()
+
+ seed_everything(opts.seed)
+
+ print(f"Generating prompt: '{opts.prompt}' (seed={opts.seed})...")
+ t0 = time.perf_counter()
+
+ use_true_cfg = abs(true_cfg - 1.0) > 1e-6
+
+
+ # 1) Prepare input noise
+ noise = get_noise(
+ num_samples=1,
+ height=opts.height,
+ width=opts.width,
+ device=self.device,
+ dtype=torch.bfloat16,
+ seed=opts.seed,
+ )
+ bs, c, h, w = noise.shape
+ noise = rearrange(noise, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
+ if noise.shape[0] == 1 and bs > 1:
+ noise = repeat(noise, "1 ... -> bs ...", bs=bs)
+ # encode
+ x = self.encode_image_to_latents(id_image, opts)
+
+ timesteps = get_schedule(opts.num_steps, x.shape[-1] * x.shape[-2] // 4, shift=False)
+
+ # 2) Prepare text embeddings
+ if self.offload:
+ self.t5 = self.t5.to(self.device)
+ self.clip_model = self.clip_model.to(self.device)
+
+ inp = prepare(t5=self.t5, clip=self.clip_model, img=x, prompt=opts.prompt)
+ inp_inversion = prepare(t5=self.t5, clip=self.clip_model, img=x, prompt="")
+ inp_neg = None
+ if use_true_cfg:
+ inp_neg = prepare(t5=self.t5, clip=self.clip_model, img=x, prompt=neg_prompt)
+
+ # Offload text encoders, load ID detection to GPU
+ if self.offload:
+ self.t5 = self.t5.cpu()
+ self.clip_model = self.clip_model.cpu()
+ torch.cuda.empty_cache()
+ self.pulid_model.components_to_device(torch.device("cuda"))
+
+ # 3) ID Embeddings (optional)
+ id_embeddings = None
+ uncond_id_embeddings = None
+ if id_image is not None:
+ id_image = np.array(id_image)
+ id_image = resize_numpy_image_long(id_image, 1024)
+ id_embeddings, uncond_id_embeddings = self.pulid_model.get_id_embedding(id_image, cal_uncond=use_true_cfg)
+ else:
+ id_embeddings = None
+ uncond_id_embeddings = None
+
+ # Offload ID pipeline, load main FLUX model to GPU
+ if self.offload:
+ self.pulid_model.components_to_device(torch.device("cpu"))
+ torch.cuda.empty_cache()
+
+ if self.aggressive_offload:
+ self.model.components_to_gpu()
+ else:
+ self.model = self.model.to(self.device)
+
+ y_0 = inp["img"].clone().detach()
+
+ inverted = None
+ if perform_inversion:
+ inverted = rf_inversion(
+ self.model,
+ **inp_inversion,
+ timesteps=timesteps,
+ guidance=opts.guidance,
+ id=id_embeddings,
+ id_weight=id_weight,
+ start_step=start_step,
+ uncond_id=uncond_id_embeddings,
+ true_cfg=inversion_true_cfg,
+ timestep_to_start_cfg=timestep_to_start_cfg,
+ neg_txt=inp_neg["txt"] if use_true_cfg else None,
+ neg_txt_ids=inp_neg["txt_ids"] if use_true_cfg else None,
+ neg_vec=inp_neg["vec"] if use_true_cfg else None,
+ aggressive_offload=self.aggressive_offload,
+ y_1=noise,
+ gamma=gamma
+ )
+
+ img = inverted
+ else:
+ img = noise
+ inp["img"] = img
+ inp_inversion["img"] = img
+
+ recon = None
+ if perform_reconstruction:
+ recon = rf_denoise(
+ self.model,
+ **inp_inversion,
+ timesteps=timesteps,
+ guidance=opts.guidance,
+ id=id_embeddings,
+ id_weight=id_weight,
+ start_step=start_step,
+ uncond_id=uncond_id_embeddings,
+ true_cfg=inversion_true_cfg,
+ timestep_to_start_cfg=timestep_to_start_cfg,
+ neg_txt=inp_neg["txt"] if use_true_cfg else None,
+ neg_txt_ids=inp_neg["txt_ids"] if use_true_cfg else None,
+ neg_vec=inp_neg["vec"] if use_true_cfg else None,
+ aggressive_offload=self.aggressive_offload,
+ y_0=y_0,
+ eta=eta,
+ s=s,
+ tau=tau,
+ )
+
+ edited = None
+ if perform_editing:
+ edited = rf_denoise(
+ self.model,
+ **inp,
+ timesteps=timesteps,
+ guidance=opts.guidance,
+ id=id_embeddings,
+ id_weight=id_weight,
+ start_step=start_step,
+ uncond_id=uncond_id_embeddings,
+ true_cfg=true_cfg,
+ timestep_to_start_cfg=timestep_to_start_cfg,
+ neg_txt=inp_neg["txt"] if use_true_cfg else None,
+ neg_txt_ids=inp_neg["txt_ids"] if use_true_cfg else None,
+ neg_vec=inp_neg["vec"] if use_true_cfg else None,
+ aggressive_offload=self.aggressive_offload,
+ y_0=y_0,
+ eta=eta,
+ s=s,
+ tau=tau,
+ )
+
+ # Offload flux model, load auto-decoder
+ if self.offload:
+ self.model.cpu()
+ torch.cuda.empty_cache()
+ self.ae.decoder.to(x.device)
+
+ # 5) Decode latents
+ if edited is not None:
+ edited = unpack(edited.float(), opts.height, opts.width)
+ with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):
+ edited = self.ae.decode(edited)
+
+ if inverted is not None:
+ inverted = unpack(inverted.float(), opts.height, opts.width)
+ with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):
+ inverted = self.ae.decode(inverted)
+
+ if recon is not None:
+ recon = unpack(recon.float(), opts.height, opts.width)
+ with torch.autocast(device_type=self.device.type, dtype=torch.bfloat16):
+ recon = self.ae.decode(recon)
+
+ if self.offload:
+ self.ae.decoder.cpu()
+ torch.cuda.empty_cache()
+
+ t1 = time.perf_counter()
+ print(f"Done in {t1 - t0:.2f} seconds.")
+
+ # Convert to PIL
+ if edited is not None:
+ edited = edited.clamp(-1, 1)
+ edited = rearrange(edited[0], "c h w -> h w c")
+ edited = Image.fromarray((127.5 * (edited + 1.0)).cpu().byte().numpy())
+
+ if inverted is not None:
+ inverted = inverted.clamp(-1, 1)
+ inverted = rearrange(inverted[0], "c h w -> h w c")
+ inverted = Image.fromarray((127.5 * (inverted + 1.0)).cpu().byte().numpy())
+
+ if recon is not None:
+ recon = recon.clamp(-1, 1)
+ recon = rearrange(recon[0], "c h w -> h w c")
+ recon = Image.fromarray((127.5 * (recon + 1.0)).cpu().byte().numpy())
+
+ return edited, str(opts.seed), self.pulid_model.debug_img_list
+
+#
Paper: PuLID: Pure and Lightning ID Customization via Contrastive Alignment | Codes: GitHub
+_HEADER_ = '''
+
+
Tight Inversion for Portrait Editing with FLUX
+
+
+❗️❗️❗️**Tips:**
+Provide a portrait image and an edit prompt. You can try the examples below or upload your own image.
+Adjust the id weight to control the faithfulness of the generated image to the input image.
+''' # noqa E501
+_CITE_ = r"""
+""" # noqa E501
+
+
+def create_demo(args, model_name: str, device: str = "cuda" if torch.cuda.is_available() else "cpu",
+ offload: bool = False, aggressive_offload: bool = False):
+ generator = FluxGenerator(model_name, device, offload, aggressive_offload, args)
+
+ with gr.Blocks() as demo:
+ gr.Markdown(_HEADER_)
+
+ with gr.Row():
+ with gr.Column():
+ prompt = gr.Textbox(label="Prompt", value="portrait, color, cinematic")
+ id_image = gr.Image(label="ID Image", type="pil")
+ id_weight = gr.Slider(0.0, 1.0, 0.4, step=0.05, label="id weight")
+
+ width = gr.Slider(256, 1536, 1024, step=16, label="Width", visible=args.dev)
+ height = gr.Slider(256, 1536, 1024, step=16, label="Height", visible=args.dev)
+ num_steps = gr.Slider(1, 28, 16, step=1, label="Number of steps")
+ guidance = gr.Slider(1.0, 10.0, 3.5, step=0.1, label="Guidance")
+
+ with gr.Accordion("Advanced Options (True CFG, true_cfg_scale=1 means use fake CFG, >1 means use true CFG", open=False): # noqa E501
+ neg_prompt = gr.Textbox(
+ label="Negative Prompt",
+ value="")
+ true_cfg = gr.Slider(1.0, 10.0, 3.5, step=0.1, label="true CFG scale")
+ timestep_to_start_cfg = gr.Slider(0, 20, 1, step=1, label="timestep to start cfg", visible=args.dev)
+ start_step = gr.Slider(0, 10, 0, step=1, label="timestep to start inserting ID")
+ seed = gr.Textbox(-1, label="Seed (-1 for random)")
+ max_sequence_length = gr.Slider(128, 512, 128, step=128,
+ label="max_sequence_length for prompt (T5), small will be faster")
+ gr.Markdown("### RF Inversion Options")
+ gamma = gr.Slider(0.0, 1.0, 0.5, step=0.1, label="gamma")
+ eta = gr.Slider(0.0, 1.0, 0.7, step=0.1, label="eta")
+ s = gr.Slider(0.0, 1.0, 0.0, step=0.1, label="s")
+ tau = gr.Slider(0, 20, 2, step=1, label="tau")
+
+ generate_btn = gr.Button("Generate")
+
+ with gr.Column():
+ output_image = gr.Image(label="Generated Image")
+ seed_output = gr.Textbox(label="Used Seed")
+ intermediate_output = gr.Gallery(label='Output', elem_id="gallery", visible=args.dev)
+ gr.Markdown(_CITE_)
+
+ with gr.Row(), gr.Column():
+ gr.Markdown("## Examples")
+ example_inps = [
+ # [
+ # 'a portrait of a vampire',
+ # 'example_inputs/unsplash/krisna-putra-pratama-lKF-MdtuIss-unsplash.jpg',
+ # 0.4, 3.5, 42, 3.5
+ # ],
+ [
+ 'a portrait of a zombie',
+ 'example_inputs/unsplash/baruk-granda-cfLL_jHQ-Iw-unsplash.jpg',
+ 0.4, 3.5, 42, 5.0
+ ],
+ [
+ 'a portrait of an elf',
+ 'example_inputs/unsplash/rahmat-alizada-7PwFKOgyoKo-unsplash.jpg',
+ 0.5, 3.5, 42, 5.0
+ ],
+ [
+ 'a portrait of a clown',
+ 'example_inputs/unsplash/lhon-karwan-11tbHtK5STE-unsplash.jpg',
+ 0.5, 3.5, 42, 3.5
+ ],
+ [
+ 'a portrait of an elf',
+ 'example_inputs/unsplash/masoud-razeghi--qsrZhXPius-unsplash.jpg',
+ 0.5, 3.5, 42, 5.0
+ ],
+ # [
+ # 'a portrait of a pirate',
+ # 'example_inputs/unsplash/mina-rad-AEVUFpDGxZM-unsplash.jpg',
+ # 0.3, 3.5, 42, 3.5
+ # ],
+ [
+ 'a portrait of a superhero',
+ 'example_inputs/unsplash/gus-tu-njana-Mf4MN7MZqcE-unsplash.jpg',
+ 0.2, 3.5, 42, 5.0
+ ],
+ ]
+ gr.Examples(examples=example_inps, inputs=[prompt, id_image, id_weight, guidance, seed, true_cfg])
+
+ generate_btn.click(
+ fn=generator.generate_image,
+ inputs=[prompt, id_image, width, height, num_steps, start_step, guidance, seed, id_weight, neg_prompt,
+ true_cfg, timestep_to_start_cfg, max_sequence_length, gamma, eta, s, tau],
+ outputs=[output_image, seed_output, intermediate_output],
+ )
+
+ return demo
+
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description="PuLID for FLUX.1-dev")
+ parser.add_argument('--version', type=str, default='v0.9.1', help='version of the model', choices=['v0.9.0', 'v0.9.1'])
+ parser.add_argument("--name", type=str, default="flux-dev", choices=list('flux-dev'),
+ help="currently only support flux-dev")
+ parser.add_argument("--device", type=str, default="cuda", help="Device to use")
+ parser.add_argument("--offload", action="store_true", help="Offload model to CPU when not in use")
+ parser.add_argument("--aggressive_offload", action="store_true", help="Offload model more aggressively to CPU when not in use, for 24G GPUs")
+ parser.add_argument("--fp8", action="store_true", help="use flux-dev-fp8 model")
+ parser.add_argument("--onnx_provider", type=str, default="gpu", choices=["gpu", "cpu"],
+ help="set onnx_provider to cpu (default gpu) can help reduce RAM usage, and when combined with"
+ "fp8 option, the peak RAM is under 15GB")
+ parser.add_argument("--port", type=int, default=8080, help="Port to use")
+ parser.add_argument("--dev", action='store_true', help="Development mode")
+ parser.add_argument("--pretrained_model", type=str, help='for development')
+ args = parser.parse_args()
+
+ if args.aggressive_offload:
+ args.offload = True
+
+ demo = create_demo(args, args.name, args.device, args.offload, args.aggressive_offload)
+ demo.launch(server_name='0.0.0.0', server_port=args.port, ssr_mode=False)
diff --git a/eva_clip/__init__.py b/eva_clip/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa2d014bbfe644b1e247758116bbf1b184738fe5
--- /dev/null
+++ b/eva_clip/__init__.py
@@ -0,0 +1,11 @@
+from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
+from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_transforms
+from .factory import list_models, add_model_config, get_model_config, load_checkpoint
+from .loss import ClipLoss
+from .model import CLIP, CustomCLIP, CLIPTextCfg, CLIPVisionCfg,\
+ convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
+from .openai import load_openai_model, list_openai_models
+from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\
+ get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
+from .tokenizer import SimpleTokenizer, tokenize
+from .transform import image_transform
\ No newline at end of file
diff --git a/eva_clip/bpe_simple_vocab_16e6.txt.gz b/eva_clip/bpe_simple_vocab_16e6.txt.gz
new file mode 100644
index 0000000000000000000000000000000000000000..36a15856e00a06a9fbed8cdd34d2393fea4a3113
--- /dev/null
+++ b/eva_clip/bpe_simple_vocab_16e6.txt.gz
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:924691ac288e54409236115652ad4aa250f48203de50a9e4722a6ecd48d6804a
+size 1356917
diff --git a/eva_clip/constants.py b/eva_clip/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..a670bb3fab442baeb9af53b91c312e6982af57ee
--- /dev/null
+++ b/eva_clip/constants.py
@@ -0,0 +1,2 @@
+OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
+OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
diff --git a/eva_clip/eva_vit_model.py b/eva_clip/eva_vit_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..51db88cf0c7b5d7a43f2be80bc59abb6c859c4b4
--- /dev/null
+++ b/eva_clip/eva_vit_model.py
@@ -0,0 +1,548 @@
+# --------------------------------------------------------
+# Adapted from https://github.com/microsoft/unilm/tree/master/beit
+# --------------------------------------------------------
+import math
+import os
+from functools import partial
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+try:
+ from timm.models.layers import drop_path, to_2tuple, trunc_normal_
+except:
+ from timm.layers import drop_path, to_2tuple, trunc_normal_
+
+from .transformer import PatchDropout
+from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
+
+if os.getenv('ENV_TYPE') == 'deepspeed':
+ try:
+ from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
+ except:
+ from torch.utils.checkpoint import checkpoint
+else:
+ from torch.utils.checkpoint import checkpoint
+
+try:
+ import xformers
+ import xformers.ops as xops
+ XFORMERS_IS_AVAILBLE = True
+except:
+ XFORMERS_IS_AVAILBLE = False
+
+class DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
+ """
+ def __init__(self, drop_prob=None):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, x):
+ return drop_path(x, self.drop_prob, self.training)
+
+ def extra_repr(self) -> str:
+ return 'p={}'.format(self.drop_prob)
+
+
+class Mlp(nn.Module):
+ def __init__(
+ self,
+ in_features,
+ hidden_features=None,
+ out_features=None,
+ act_layer=nn.GELU,
+ norm_layer=nn.LayerNorm,
+ drop=0.,
+ subln=False,
+
+ ):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+
+ self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
+
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ # x = self.drop(x)
+ # commit this for the orignal BERT implement
+ x = self.ffn_ln(x)
+
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+class SwiGLU(nn.Module):
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.,
+ norm_layer=nn.LayerNorm, subln=False):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+
+ self.w1 = nn.Linear(in_features, hidden_features)
+ self.w2 = nn.Linear(in_features, hidden_features)
+
+ self.act = act_layer()
+ self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
+ self.w3 = nn.Linear(hidden_features, out_features)
+
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x1 = self.w1(x)
+ x2 = self.w2(x)
+ hidden = self.act(x1) * x2
+ x = self.ffn_ln(hidden)
+ x = self.w3(x)
+ x = self.drop(x)
+ return x
+
+class Attention(nn.Module):
+ def __init__(
+ self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
+ proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ if attn_head_dim is not None:
+ head_dim = attn_head_dim
+ all_head_dim = head_dim * self.num_heads
+ self.scale = qk_scale or head_dim ** -0.5
+
+ self.subln = subln
+ if self.subln:
+ self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
+ self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
+ self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
+ else:
+ self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
+
+ if qkv_bias:
+ self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
+ self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
+ else:
+ self.q_bias = None
+ self.v_bias = None
+
+ if window_size:
+ self.window_size = window_size
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
+ # cls to token & token 2 cls & cls to cls
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(window_size[0])
+ coords_w = torch.arange(window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
+ relative_position_index = \
+ torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
+ relative_position_index[0, 0] = self.num_relative_distance - 1
+
+ self.register_buffer("relative_position_index", relative_position_index)
+ else:
+ self.window_size = None
+ self.relative_position_bias_table = None
+ self.relative_position_index = None
+
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity()
+ # self.proj = nn.Linear(all_head_dim, all_head_dim)
+ self.proj = nn.Linear(all_head_dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+ self.xattn = xattn
+ self.xattn_drop = attn_drop
+
+ self.rope = rope
+
+ def forward(self, x, rel_pos_bias=None, attn_mask=None):
+ B, N, C = x.shape
+ if self.subln:
+ q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
+ k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
+ v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
+
+ q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
+ k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
+ v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
+ else:
+
+ qkv_bias = None
+ if self.q_bias is not None:
+ qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
+
+ qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
+ qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C
+ q, k, v = qkv[0], qkv[1], qkv[2]
+
+ if self.rope:
+ # slightly fast impl
+ q_t = q[:, :, 1:, :]
+ ro_q_t = self.rope(q_t)
+ q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v)
+
+ k_t = k[:, :, 1:, :]
+ ro_k_t = self.rope(k_t)
+ k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v)
+
+ if self.xattn:
+ q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
+ k = k.permute(0, 2, 1, 3)
+ v = v.permute(0, 2, 1, 3)
+
+ x = xops.memory_efficient_attention(
+ q, k, v,
+ p=self.xattn_drop,
+ scale=self.scale,
+ )
+ x = x.reshape(B, N, -1)
+ x = self.inner_attn_ln(x)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ else:
+ q = q * self.scale
+ attn = (q @ k.transpose(-2, -1))
+
+ if self.relative_position_bias_table is not None:
+ relative_position_bias = \
+ self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1] + 1,
+ self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ attn = attn + relative_position_bias.unsqueeze(0).type_as(attn)
+
+ if rel_pos_bias is not None:
+ attn = attn + rel_pos_bias.type_as(attn)
+
+ if attn_mask is not None:
+ attn_mask = attn_mask.bool()
+ attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf"))
+
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
+ x = self.inner_attn_ln(x)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class Block(nn.Module):
+
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
+ window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False,
+ subln=False, naiveswiglu=False):
+ super().__init__()
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim,
+ xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+
+ if naiveswiglu:
+ self.mlp = SwiGLU(
+ in_features=dim,
+ hidden_features=mlp_hidden_dim,
+ subln=subln,
+ norm_layer=norm_layer,
+ )
+ else:
+ self.mlp = Mlp(
+ in_features=dim,
+ hidden_features=mlp_hidden_dim,
+ act_layer=act_layer,
+ subln=subln,
+ drop=drop
+ )
+
+ if init_values is not None and init_values > 0:
+ self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
+ self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
+ else:
+ self.gamma_1, self.gamma_2 = None, None
+
+ self.postnorm = postnorm
+
+ def forward(self, x, rel_pos_bias=None, attn_mask=None):
+ if self.gamma_1 is None:
+ if self.postnorm:
+ x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
+ x = x + self.drop_path(self.norm2(self.mlp(x)))
+ else:
+ x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ else:
+ if self.postnorm:
+ x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)))
+ x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
+ else:
+ x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))
+ x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
+ return x
+
+
+class PatchEmbed(nn.Module):
+ """ Image to Patch Embedding
+ """
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
+ super().__init__()
+ img_size = to_2tuple(img_size)
+ patch_size = to_2tuple(patch_size)
+ num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
+ self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
+ self.img_size = img_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, x, **kwargs):
+ B, C, H, W = x.shape
+ # FIXME look at relaxing size constraints
+ assert H == self.img_size[0] and W == self.img_size[1], \
+ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
+ x = self.proj(x).flatten(2).transpose(1, 2)
+ return x
+
+
+class RelativePositionBias(nn.Module):
+
+ def __init__(self, window_size, num_heads):
+ super().__init__()
+ self.window_size = window_size
+ self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
+ # cls to token & token 2 cls & cls to cls
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(window_size[0])
+ coords_w = torch.arange(window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * window_size[1] - 1
+ relative_position_index = \
+ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
+ relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ relative_position_index[0, 0:] = self.num_relative_distance - 3
+ relative_position_index[0:, 0] = self.num_relative_distance - 2
+ relative_position_index[0, 0] = self.num_relative_distance - 1
+
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ def forward(self):
+ relative_position_bias = \
+ self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1] + 1,
+ self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
+ return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+
+
+class EVAVisionTransformer(nn.Module):
+ """ Vision Transformer with support for patch or hybrid CNN input stage
+ """
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
+ num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
+ drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0.,
+ use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False,
+ use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False,
+ pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False):
+ super().__init__()
+
+ if not XFORMERS_IS_AVAILBLE:
+ xattn = False
+
+ self.image_size = img_size
+ self.num_classes = num_classes
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
+
+ self.patch_embed = PatchEmbed(
+ img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
+ num_patches = self.patch_embed.num_patches
+
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
+ # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
+ if use_abs_pos_emb:
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
+ else:
+ self.pos_embed = None
+ self.pos_drop = nn.Dropout(p=drop_rate)
+
+ if use_shared_rel_pos_bias:
+ self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
+ else:
+ self.rel_pos_bias = None
+
+ if rope:
+ half_head_dim = embed_dim // num_heads // 2
+ hw_seq_len = img_size // patch_size
+ self.rope = VisionRotaryEmbeddingFast(
+ dim=half_head_dim,
+ pt_seq_len=pt_hw_seq_len,
+ ft_seq_len=hw_seq_len if intp_freq else None,
+ # patch_dropout=patch_dropout
+ )
+ else:
+ self.rope = None
+
+ self.naiveswiglu = naiveswiglu
+
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
+ self.use_rel_pos_bias = use_rel_pos_bias
+ self.blocks = nn.ModuleList([
+ Block(
+ dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
+ init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
+ xattn=xattn, rope=self.rope, postnorm=postnorm, subln=subln, naiveswiglu=naiveswiglu)
+ for i in range(depth)])
+ self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
+ self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None
+ self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
+
+ if self.pos_embed is not None:
+ trunc_normal_(self.pos_embed, std=.02)
+
+ trunc_normal_(self.cls_token, std=.02)
+ # trunc_normal_(self.mask_token, std=.02)
+
+ self.apply(self._init_weights)
+ self.fix_init_weight()
+
+ if isinstance(self.head, nn.Linear):
+ trunc_normal_(self.head.weight, std=.02)
+ self.head.weight.data.mul_(init_scale)
+ self.head.bias.data.mul_(init_scale)
+
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
+
+ self.grad_checkpointing = grad_checkpointing
+
+ def fix_init_weight(self):
+ def rescale(param, layer_id):
+ param.div_(math.sqrt(2.0 * layer_id))
+
+ for layer_id, layer in enumerate(self.blocks):
+ rescale(layer.attn.proj.weight.data, layer_id + 1)
+ if self.naiveswiglu:
+ rescale(layer.mlp.w3.weight.data, layer_id + 1)
+ else:
+ rescale(layer.mlp.fc2.weight.data, layer_id + 1)
+
+ def get_cast_dtype(self) -> torch.dtype:
+ return self.blocks[0].mlp.fc2.weight.dtype
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ def get_num_layers(self):
+ return len(self.blocks)
+
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
+ assert unlocked_groups == 0, 'partial locking not currently supported for this model'
+ for param in self.parameters():
+ param.requires_grad = False
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.grad_checkpointing = enable
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'pos_embed', 'cls_token'}
+
+ def get_classifier(self):
+ return self.head
+
+ def reset_classifier(self, num_classes, global_pool=''):
+ self.num_classes = num_classes
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
+
+ def forward_features(self, x, return_all_features=False, return_hidden=False, shuffle=False):
+
+ x = self.patch_embed(x)
+ batch_size, seq_len, _ = x.size()
+
+ if shuffle:
+ idx = torch.randperm(x.shape[1]) + 1
+ zero = torch.LongTensor([0, ])
+ idx = torch.cat([zero, idx])
+ pos_embed = self.pos_embed[:, idx]
+
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
+ x = torch.cat((cls_tokens, x), dim=1)
+ if shuffle:
+ x = x + pos_embed
+ elif self.pos_embed is not None:
+ x = x + self.pos_embed
+ x = self.pos_drop(x)
+
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
+ if os.getenv('RoPE') == '1':
+ if self.training and not isinstance(self.patch_dropout, nn.Identity):
+ x, patch_indices_keep = self.patch_dropout(x)
+ self.rope.forward = partial(self.rope.forward, patch_indices_keep=patch_indices_keep)
+ else:
+ self.rope.forward = partial(self.rope.forward, patch_indices_keep=None)
+ x = self.patch_dropout(x)
+ else:
+ x = self.patch_dropout(x)
+
+ rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
+ hidden_states = []
+ for idx, blk in enumerate(self.blocks):
+ if (0 < idx <= 20) and (idx % 4 == 0) and return_hidden:
+ hidden_states.append(x)
+ if self.grad_checkpointing:
+ x = checkpoint(blk, x, (rel_pos_bias,))
+ else:
+ x = blk(x, rel_pos_bias=rel_pos_bias)
+
+ if not return_all_features:
+ x = self.norm(x)
+ if self.fc_norm is not None:
+ return self.fc_norm(x.mean(1)), hidden_states
+ else:
+ return x[:, 0], hidden_states
+ return x
+
+ def forward(self, x, return_all_features=False, return_hidden=False, shuffle=False):
+ if return_all_features:
+ return self.forward_features(x, return_all_features, return_hidden, shuffle)
+ x, hidden_states = self.forward_features(x, return_all_features, return_hidden, shuffle)
+ x = self.head(x)
+ if return_hidden:
+ return x, hidden_states
+ return x
diff --git a/eva_clip/factory.py b/eva_clip/factory.py
new file mode 100644
index 0000000000000000000000000000000000000000..ced8999997bf374b69f846bc73ea635fe8a6eb63
--- /dev/null
+++ b/eva_clip/factory.py
@@ -0,0 +1,517 @@
+import json
+import logging
+import os
+import pathlib
+import re
+from copy import deepcopy
+from pathlib import Path
+from typing import Optional, Tuple, Union, Dict, Any
+import torch
+
+from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
+from .model import CLIP, CustomCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
+ get_cast_dtype
+from .openai import load_openai_model
+from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model
+from .transform import image_transform
+from .tokenizer import HFTokenizer, tokenize
+from .utils import resize_clip_pos_embed, resize_evaclip_pos_embed, resize_visual_pos_embed, resize_eva_pos_embed
+
+
+_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
+_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
+
+
+def _natural_key(string_):
+ return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
+
+
+def _rescan_model_configs():
+ global _MODEL_CONFIGS
+
+ config_ext = ('.json',)
+ config_files = []
+ for config_path in _MODEL_CONFIG_PATHS:
+ if config_path.is_file() and config_path.suffix in config_ext:
+ config_files.append(config_path)
+ elif config_path.is_dir():
+ for ext in config_ext:
+ config_files.extend(config_path.glob(f'*{ext}'))
+
+ for cf in config_files:
+ with open(cf, "r", encoding="utf8") as f:
+ model_cfg = json.load(f)
+ if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
+ _MODEL_CONFIGS[cf.stem] = model_cfg
+
+ _MODEL_CONFIGS = dict(sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0])))
+
+
+_rescan_model_configs() # initial populate of model config registry
+
+
+def list_models():
+ """ enumerate available model architectures based on config files """
+ return list(_MODEL_CONFIGS.keys())
+
+
+def add_model_config(path):
+ """ add model config path or file and update registry """
+ if not isinstance(path, Path):
+ path = Path(path)
+ _MODEL_CONFIG_PATHS.append(path)
+ _rescan_model_configs()
+
+
+def get_model_config(model_name):
+ if model_name in _MODEL_CONFIGS:
+ return deepcopy(_MODEL_CONFIGS[model_name])
+ else:
+ return None
+
+
+def get_tokenizer(model_name):
+ config = get_model_config(model_name)
+ tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
+ return tokenizer
+
+
+# loading openai CLIP weights when is_openai=True for training
+def load_state_dict(checkpoint_path: str, map_location: str='cpu', model_key: str='model|module|state_dict', is_openai: bool=False, skip_list: list=[]):
+ if is_openai:
+ model = torch.jit.load(checkpoint_path, map_location="cpu").eval()
+ state_dict = model.state_dict()
+ for key in ["input_resolution", "context_length", "vocab_size"]:
+ state_dict.pop(key, None)
+ else:
+ checkpoint = torch.load(checkpoint_path, map_location=map_location)
+ for mk in model_key.split('|'):
+ if isinstance(checkpoint, dict) and mk in checkpoint:
+ state_dict = checkpoint[mk]
+ break
+ else:
+ state_dict = checkpoint
+ if next(iter(state_dict.items()))[0].startswith('module'):
+ state_dict = {k[7:]: v for k, v in state_dict.items()}
+
+ for k in skip_list:
+ if k in list(state_dict.keys()):
+ logging.info(f"Removing key {k} from pretrained checkpoint")
+ del state_dict[k]
+
+ if os.getenv('RoPE') == '1':
+ for k in list(state_dict.keys()):
+ if 'freqs_cos' in k or 'freqs_sin' in k:
+ del state_dict[k]
+ return state_dict
+
+
+
+def load_checkpoint(model, checkpoint_path, model_key="model|module|state_dict", strict=True):
+ state_dict = load_state_dict(checkpoint_path, model_key=model_key, is_openai=False)
+ # detect old format and make compatible with new format
+ if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
+ state_dict = convert_to_custom_text_state_dict(state_dict)
+ if 'text.logit_scale' in state_dict and hasattr(model, 'logit_scale'):
+ state_dict['logit_scale'] = state_dict['text.logit_scale']
+ del state_dict['text.logit_scale']
+
+ # resize_clip_pos_embed for CLIP and open CLIP
+ if 'visual.positional_embedding' in state_dict:
+ resize_clip_pos_embed(state_dict, model)
+ # specified to eva_vit_model
+ elif 'visual.pos_embed' in state_dict:
+ resize_evaclip_pos_embed(state_dict, model)
+
+ # resize_clip_pos_embed(state_dict, model)
+ incompatible_keys = model.load_state_dict(state_dict, strict=strict)
+ logging.info(f"incompatible_keys.missing_keys: {incompatible_keys.missing_keys}")
+ return incompatible_keys
+
+def load_clip_visual_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
+
+ for k in list(state_dict.keys()):
+ if not k.startswith('visual.'):
+ del state_dict[k]
+ for k in list(state_dict.keys()):
+ if k.startswith('visual.'):
+ new_k = k[7:]
+ state_dict[new_k] = state_dict[k]
+ del state_dict[k]
+ return state_dict
+
+def load_clip_text_state_dict(checkpoint_path: str, map_location: str='cpu', is_openai: bool=False, skip_list:list=[]):
+ state_dict = load_state_dict(checkpoint_path, map_location=map_location, is_openai=is_openai, skip_list=skip_list)
+
+ for k in list(state_dict.keys()):
+ if k.startswith('visual.'):
+ del state_dict[k]
+ return state_dict
+
+def get_pretrained_tag(pretrained_model):
+ pretrained_model = pretrained_model.lower()
+ if "laion" in pretrained_model or "open_clip" in pretrained_model:
+ return "open_clip"
+ elif "openai" in pretrained_model:
+ return "clip"
+ elif "eva" in pretrained_model and "clip" in pretrained_model:
+ return "eva_clip"
+ else:
+ return "other"
+
+def load_pretrained_checkpoint(
+ model,
+ visual_checkpoint_path,
+ text_checkpoint_path,
+ strict=True,
+ visual_model=None,
+ text_model=None,
+ model_key="model|module|state_dict",
+ skip_list=[]):
+ visual_tag = get_pretrained_tag(visual_model)
+ text_tag = get_pretrained_tag(text_model)
+
+ logging.info(f"num of model state_dict keys: {len(model.state_dict().keys())}")
+ visual_incompatible_keys, text_incompatible_keys = None, None
+ if visual_checkpoint_path:
+ if visual_tag == "eva_clip" or visual_tag == "open_clip":
+ visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=False, skip_list=skip_list)
+ elif visual_tag == "clip":
+ visual_state_dict = load_clip_visual_state_dict(visual_checkpoint_path, is_openai=True, skip_list=skip_list)
+ else:
+ visual_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
+
+ # resize_clip_pos_embed for CLIP and open CLIP
+ if 'positional_embedding' in visual_state_dict:
+ resize_visual_pos_embed(visual_state_dict, model)
+ # specified to EVA model
+ elif 'pos_embed' in visual_state_dict:
+ resize_eva_pos_embed(visual_state_dict, model)
+
+ visual_incompatible_keys = model.visual.load_state_dict(visual_state_dict, strict=strict)
+ logging.info(f"num of loaded visual_state_dict keys: {len(visual_state_dict.keys())}")
+ logging.info(f"visual_incompatible_keys.missing_keys: {visual_incompatible_keys.missing_keys}")
+
+ if text_checkpoint_path:
+ if text_tag == "eva_clip" or text_tag == "open_clip":
+ text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=False, skip_list=skip_list)
+ elif text_tag == "clip":
+ text_state_dict = load_clip_text_state_dict(text_checkpoint_path, is_openai=True, skip_list=skip_list)
+ else:
+ text_state_dict = load_state_dict(visual_checkpoint_path, model_key=model_key, is_openai=False, skip_list=skip_list)
+
+ text_incompatible_keys = model.text.load_state_dict(text_state_dict, strict=strict)
+
+ logging.info(f"num of loaded text_state_dict keys: {len(text_state_dict.keys())}")
+ logging.info(f"text_incompatible_keys.missing_keys: {text_incompatible_keys.missing_keys}")
+
+ return visual_incompatible_keys, text_incompatible_keys
+
+def create_model(
+ model_name: str,
+ pretrained: Optional[str] = None,
+ precision: str = 'fp32',
+ device: Union[str, torch.device] = 'cpu',
+ jit: bool = False,
+ force_quick_gelu: bool = False,
+ force_custom_clip: bool = False,
+ force_patch_dropout: Optional[float] = None,
+ pretrained_image: str = '',
+ pretrained_text: str = '',
+ pretrained_hf: bool = True,
+ pretrained_visual_model: str = None,
+ pretrained_text_model: str = None,
+ cache_dir: Optional[str] = None,
+ skip_list: list = [],
+):
+ model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
+ if isinstance(device, str):
+ device = torch.device(device)
+
+ if pretrained and pretrained.lower() == 'openai':
+ logging.info(f'Loading pretrained {model_name} from OpenAI.')
+ model = load_openai_model(
+ model_name,
+ precision=precision,
+ device=device,
+ jit=jit,
+ cache_dir=cache_dir,
+ )
+ else:
+ model_cfg = get_model_config(model_name)
+ if model_cfg is not None:
+ logging.info(f'Loaded {model_name} model config.')
+ else:
+ logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
+ raise RuntimeError(f'Model config for {model_name} not found.')
+
+ if 'rope' in model_cfg.get('vision_cfg', {}):
+ if model_cfg['vision_cfg']['rope']:
+ os.environ['RoPE'] = "1"
+ else:
+ os.environ['RoPE'] = "0"
+
+ if force_quick_gelu:
+ # override for use of QuickGELU on non-OpenAI transformer models
+ model_cfg["quick_gelu"] = True
+
+ if force_patch_dropout is not None:
+ # override the default patch dropout value
+ model_cfg['vision_cfg']["patch_dropout"] = force_patch_dropout
+
+ cast_dtype = get_cast_dtype(precision)
+ custom_clip = model_cfg.pop('custom_text', False) or force_custom_clip or ('hf_model_name' in model_cfg['text_cfg'])
+
+
+ if custom_clip:
+ if 'hf_model_name' in model_cfg.get('text_cfg', {}):
+ model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
+ model = CustomCLIP(**model_cfg, cast_dtype=cast_dtype)
+ else:
+ model = CLIP(**model_cfg, cast_dtype=cast_dtype)
+
+ pretrained_cfg = {}
+ if pretrained:
+ checkpoint_path = ''
+ pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
+ if pretrained_cfg:
+ checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
+ elif os.path.exists(pretrained):
+ checkpoint_path = pretrained
+
+ if checkpoint_path:
+ logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
+ load_checkpoint(model,
+ checkpoint_path,
+ model_key="model|module|state_dict",
+ strict=False
+ )
+ else:
+ error_str = (
+ f'Pretrained weights ({pretrained}) not found for model {model_name}.'
+ f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
+ logging.warning(error_str)
+ raise RuntimeError(error_str)
+ else:
+ visual_checkpoint_path = ''
+ text_checkpoint_path = ''
+
+ if pretrained_image:
+ pretrained_visual_model = pretrained_visual_model.replace('/', '-') # for callers using old naming with / in ViT names
+ pretrained_image_cfg = get_pretrained_cfg(pretrained_visual_model, pretrained_image)
+ if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
+ # pretrained weight loading for timm models set via vision_cfg
+ model_cfg['vision_cfg']['timm_model_pretrained'] = True
+ elif pretrained_image_cfg:
+ visual_checkpoint_path = download_pretrained(pretrained_image_cfg, cache_dir=cache_dir)
+ elif os.path.exists(pretrained_image):
+ visual_checkpoint_path = pretrained_image
+ else:
+ logging.warning(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
+ raise RuntimeError(f'Pretrained weights ({visual_checkpoint_path}) not found for model {model_name}.visual.')
+
+ if pretrained_text:
+ pretrained_text_model = pretrained_text_model.replace('/', '-') # for callers using old naming with / in ViT names
+ pretrained_text_cfg = get_pretrained_cfg(pretrained_text_model, pretrained_text)
+ if pretrained_image_cfg:
+ text_checkpoint_path = download_pretrained(pretrained_text_cfg, cache_dir=cache_dir)
+ elif os.path.exists(pretrained_text):
+ text_checkpoint_path = pretrained_text
+ else:
+ logging.warning(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
+ raise RuntimeError(f'Pretrained weights ({text_checkpoint_path}) not found for model {model_name}.text.')
+
+ if visual_checkpoint_path:
+ logging.info(f'Loading pretrained {model_name}.visual weights ({visual_checkpoint_path}).')
+ if text_checkpoint_path:
+ logging.info(f'Loading pretrained {model_name}.text weights ({text_checkpoint_path}).')
+
+ if visual_checkpoint_path or text_checkpoint_path:
+ load_pretrained_checkpoint(
+ model,
+ visual_checkpoint_path,
+ text_checkpoint_path,
+ strict=False,
+ visual_model=pretrained_visual_model,
+ text_model=pretrained_text_model,
+ model_key="model|module|state_dict",
+ skip_list=skip_list
+ )
+
+ if "fp16" in precision or "bf16" in precision:
+ logging.info(f'convert precision to {precision}')
+ model = model.to(torch.bfloat16) if 'bf16' in precision else model.to(torch.float16)
+
+ model.to(device=device)
+
+ # set image / mean metadata from pretrained_cfg if available, or use default
+ model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
+ model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
+
+ if jit:
+ model = torch.jit.script(model)
+
+ return model
+
+
+def create_model_and_transforms(
+ model_name: str,
+ pretrained: Optional[str] = None,
+ precision: str = 'fp32',
+ device: Union[str, torch.device] = 'cpu',
+ jit: bool = False,
+ force_quick_gelu: bool = False,
+ force_custom_clip: bool = False,
+ force_patch_dropout: Optional[float] = None,
+ pretrained_image: str = '',
+ pretrained_text: str = '',
+ pretrained_hf: bool = True,
+ pretrained_visual_model: str = None,
+ pretrained_text_model: str = None,
+ image_mean: Optional[Tuple[float, ...]] = None,
+ image_std: Optional[Tuple[float, ...]] = None,
+ cache_dir: Optional[str] = None,
+ skip_list: list = [],
+):
+ model = create_model(
+ model_name,
+ pretrained,
+ precision=precision,
+ device=device,
+ jit=jit,
+ force_quick_gelu=force_quick_gelu,
+ force_custom_clip=force_custom_clip,
+ force_patch_dropout=force_patch_dropout,
+ pretrained_image=pretrained_image,
+ pretrained_text=pretrained_text,
+ pretrained_hf=pretrained_hf,
+ pretrained_visual_model=pretrained_visual_model,
+ pretrained_text_model=pretrained_text_model,
+ cache_dir=cache_dir,
+ skip_list=skip_list,
+ )
+
+ image_mean = image_mean or getattr(model.visual, 'image_mean', None)
+ image_std = image_std or getattr(model.visual, 'image_std', None)
+ preprocess_train = image_transform(
+ model.visual.image_size,
+ is_train=True,
+ mean=image_mean,
+ std=image_std
+ )
+ preprocess_val = image_transform(
+ model.visual.image_size,
+ is_train=False,
+ mean=image_mean,
+ std=image_std
+ )
+
+ return model, preprocess_train, preprocess_val
+
+
+def create_transforms(
+ model_name: str,
+ pretrained: Optional[str] = None,
+ precision: str = 'fp32',
+ device: Union[str, torch.device] = 'cpu',
+ jit: bool = False,
+ force_quick_gelu: bool = False,
+ force_custom_clip: bool = False,
+ force_patch_dropout: Optional[float] = None,
+ pretrained_image: str = '',
+ pretrained_text: str = '',
+ pretrained_hf: bool = True,
+ pretrained_visual_model: str = None,
+ pretrained_text_model: str = None,
+ image_mean: Optional[Tuple[float, ...]] = None,
+ image_std: Optional[Tuple[float, ...]] = None,
+ cache_dir: Optional[str] = None,
+ skip_list: list = [],
+):
+ model = create_model(
+ model_name,
+ pretrained,
+ precision=precision,
+ device=device,
+ jit=jit,
+ force_quick_gelu=force_quick_gelu,
+ force_custom_clip=force_custom_clip,
+ force_patch_dropout=force_patch_dropout,
+ pretrained_image=pretrained_image,
+ pretrained_text=pretrained_text,
+ pretrained_hf=pretrained_hf,
+ pretrained_visual_model=pretrained_visual_model,
+ pretrained_text_model=pretrained_text_model,
+ cache_dir=cache_dir,
+ skip_list=skip_list,
+ )
+
+
+ image_mean = image_mean or getattr(model.visual, 'image_mean', None)
+ image_std = image_std or getattr(model.visual, 'image_std', None)
+ preprocess_train = image_transform(
+ model.visual.image_size,
+ is_train=True,
+ mean=image_mean,
+ std=image_std
+ )
+ preprocess_val = image_transform(
+ model.visual.image_size,
+ is_train=False,
+ mean=image_mean,
+ std=image_std
+ )
+ del model
+
+ return preprocess_train, preprocess_val
+
+def create_model_from_pretrained(
+ model_name: str,
+ pretrained: str,
+ precision: str = 'fp32',
+ device: Union[str, torch.device] = 'cpu',
+ jit: bool = False,
+ force_quick_gelu: bool = False,
+ force_custom_clip: bool = False,
+ force_patch_dropout: Optional[float] = None,
+ return_transform: bool = True,
+ image_mean: Optional[Tuple[float, ...]] = None,
+ image_std: Optional[Tuple[float, ...]] = None,
+ cache_dir: Optional[str] = None,
+ is_frozen: bool = False,
+):
+ if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained):
+ raise RuntimeError(
+ f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}.'
+ f' Use open_clip.list_pretrained() to find one.')
+
+ model = create_model(
+ model_name,
+ pretrained,
+ precision=precision,
+ device=device,
+ jit=jit,
+ force_quick_gelu=force_quick_gelu,
+ force_custom_clip=force_custom_clip,
+ force_patch_dropout=force_patch_dropout,
+ cache_dir=cache_dir,
+ )
+
+ if is_frozen:
+ for param in model.parameters():
+ param.requires_grad = False
+
+ if not return_transform:
+ return model
+
+ image_mean = image_mean or getattr(model.visual, 'image_mean', None)
+ image_std = image_std or getattr(model.visual, 'image_std', None)
+ preprocess = image_transform(
+ model.visual.image_size,
+ is_train=False,
+ mean=image_mean,
+ std=image_std
+ )
+
+ return model, preprocess
diff --git a/eva_clip/hf_configs.py b/eva_clip/hf_configs.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8c9b704db1879676aed5cef26796303b65fe987
--- /dev/null
+++ b/eva_clip/hf_configs.py
@@ -0,0 +1,57 @@
+# HF architecture dict:
+arch_dict = {
+ # https://huggingface.co/docs/transformers/model_doc/roberta#roberta
+ "roberta": {
+ "config_names": {
+ "context_length": "max_position_embeddings",
+ "vocab_size": "vocab_size",
+ "width": "hidden_size",
+ "heads": "num_attention_heads",
+ "layers": "num_hidden_layers",
+ "layer_attr": "layer",
+ "token_embeddings_attr": "embeddings"
+ },
+ "pooler": "mean_pooler",
+ },
+ # https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
+ "xlm-roberta": {
+ "config_names": {
+ "context_length": "max_position_embeddings",
+ "vocab_size": "vocab_size",
+ "width": "hidden_size",
+ "heads": "num_attention_heads",
+ "layers": "num_hidden_layers",
+ "layer_attr": "layer",
+ "token_embeddings_attr": "embeddings"
+ },
+ "pooler": "mean_pooler",
+ },
+ # https://huggingface.co/docs/transformers/model_doc/mt5#mt5
+ "mt5": {
+ "config_names": {
+ # unlimited seqlen
+ # https://github.com/google-research/text-to-text-transfer-transformer/issues/273
+ # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
+ "context_length": "",
+ "vocab_size": "vocab_size",
+ "width": "d_model",
+ "heads": "num_heads",
+ "layers": "num_layers",
+ "layer_attr": "block",
+ "token_embeddings_attr": "embed_tokens"
+ },
+ "pooler": "mean_pooler",
+ },
+ "bert": {
+ "config_names": {
+ "context_length": "max_position_embeddings",
+ "vocab_size": "vocab_size",
+ "width": "hidden_size",
+ "heads": "num_attention_heads",
+ "layers": "num_hidden_layers",
+ "layer_attr": "layer",
+ "token_embeddings_attr": "embeddings"
+ },
+ "pooler": "mean_pooler",
+ }
+}
diff --git a/eva_clip/hf_model.py b/eva_clip/hf_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4b9fd85b4066ba31db2bda5767ed1ce15de479d
--- /dev/null
+++ b/eva_clip/hf_model.py
@@ -0,0 +1,248 @@
+""" huggingface model adapter
+
+Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
+"""
+
+import re
+
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+from torch import TensorType
+try:
+ import transformers
+ from transformers import AutoModel, AutoModelForMaskedLM, AutoTokenizer, AutoConfig, PretrainedConfig
+ from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
+ BaseModelOutputWithPoolingAndCrossAttentions
+except ImportError as e:
+ transformers = None
+
+
+ class BaseModelOutput:
+ pass
+
+
+ class PretrainedConfig:
+ pass
+
+from .hf_configs import arch_dict
+
+# utils
+def _camel2snake(s):
+ return re.sub(r'(? TensorType:
+ # image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(x.device)
+ # attn_mask = (x != self.config.pad_token_id).long()
+ # out = self.transformer(
+ # input_ids=x,
+ # attention_mask=attn_mask,
+ # encoder_hidden_states = image_embeds,
+ # encoder_attention_mask = image_atts,
+ # )
+ # pooled_out = self.pooler(out, attn_mask)
+
+ # return self.itm_proj(pooled_out)
+
+ def mask(self, input_ids, vocab_size, device, targets=None, masked_indices=None, probability_matrix=None):
+ if masked_indices is None:
+ masked_indices = torch.bernoulli(probability_matrix).bool()
+
+ masked_indices[input_ids == self.tokenizer.pad_token_id] = False
+ masked_indices[input_ids == self.tokenizer.cls_token_id] = False
+
+ if targets is not None:
+ targets[~masked_indices] = -100 # We only compute loss on masked tokens
+
+ # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
+ indices_replaced = torch.bernoulli(torch.full(input_ids.shape, 0.8)).bool() & masked_indices
+ input_ids[indices_replaced] = self.tokenizer.mask_token_id
+
+ # 10% of the time, we replace masked input tokens with random word
+ indices_random = torch.bernoulli(torch.full(input_ids.shape, 0.5)).bool() & masked_indices & ~indices_replaced
+ random_words = torch.randint(vocab_size, input_ids.shape, dtype=torch.long).to(device)
+ input_ids[indices_random] = random_words[indices_random]
+ # The rest of the time (10% of the time) we keep the masked input tokens unchanged
+
+ if targets is not None:
+ return input_ids, targets
+ else:
+ return input_ids
+
+ def forward_mlm(self, input_ids, image_embeds, mlm_probability=0.25):
+ labels = input_ids.clone()
+ attn_mask = (input_ids != self.config.pad_token_id).long()
+ image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(input_ids.device)
+ vocab_size = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["vocab_size"])
+ probability_matrix = torch.full(labels.shape, mlm_probability)
+ input_ids, labels = self.mask(input_ids, vocab_size, input_ids.device, targets=labels,
+ probability_matrix = probability_matrix)
+ mlm_output = self.transformer(input_ids,
+ attention_mask = attn_mask,
+ encoder_hidden_states = image_embeds,
+ encoder_attention_mask = image_atts,
+ return_dict = True,
+ labels = labels,
+ )
+ return mlm_output.loss
+ # mlm_output = self.transformer(input_ids,
+ # attention_mask = attn_mask,
+ # encoder_hidden_states = image_embeds,
+ # encoder_attention_mask = image_atts,
+ # return_dict = True,
+ # ).last_hidden_state
+ # logits = self.mlm_proj(mlm_output)
+
+ # # logits = logits[:, :-1, :].contiguous().view(-1, vocab_size)
+ # logits = logits[:, 1:, :].contiguous().view(-1, vocab_size)
+ # labels = labels[:, 1:].contiguous().view(-1)
+
+ # mlm_loss = F.cross_entropy(
+ # logits,
+ # labels,
+ # # label_smoothing=0.1,
+ # )
+ # return mlm_loss
+
+
+ def forward(self, x:TensorType) -> TensorType:
+ attn_mask = (x != self.config.pad_token_id).long()
+ out = self.transformer(input_ids=x, attention_mask=attn_mask)
+ pooled_out = self.pooler(out, attn_mask)
+
+ return self.proj(pooled_out)
+
+ def lock(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
+ if not unlocked_layers: # full freezing
+ for n, p in self.transformer.named_parameters():
+ p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
+ return
+
+ encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
+ layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
+ print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
+ embeddings = getattr(
+ self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
+ modules = [embeddings, *layer_list][:-unlocked_layers]
+ # freeze layers
+ for module in modules:
+ for n, p in module.named_parameters():
+ p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
+
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.transformer.gradient_checkpointing_enable()
+
+ def get_num_layers(self):
+ encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
+ layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
+ return len(layer_list)
+
+ def init_parameters(self):
+ pass
diff --git a/eva_clip/loss.py b/eva_clip/loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..473f60d98d501067e85ace2dd089b00e249b6d17
--- /dev/null
+++ b/eva_clip/loss.py
@@ -0,0 +1,138 @@
+import math
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+try:
+ import torch.distributed.nn
+ from torch import distributed as dist
+ has_distributed = True
+except ImportError:
+ has_distributed = False
+
+try:
+ import horovod.torch as hvd
+except ImportError:
+ hvd = None
+
+from timm.loss import LabelSmoothingCrossEntropy
+
+
+def gather_features(
+ image_features,
+ text_features,
+ local_loss=False,
+ gather_with_grad=False,
+ rank=0,
+ world_size=1,
+ use_horovod=False
+):
+ assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
+ if use_horovod:
+ assert hvd is not None, 'Please install horovod'
+ if gather_with_grad:
+ all_image_features = hvd.allgather(image_features)
+ all_text_features = hvd.allgather(text_features)
+ else:
+ with torch.no_grad():
+ all_image_features = hvd.allgather(image_features)
+ all_text_features = hvd.allgather(text_features)
+ if not local_loss:
+ # ensure grads for local rank when all_* features don't have a gradient
+ gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
+ gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
+ gathered_image_features[rank] = image_features
+ gathered_text_features[rank] = text_features
+ all_image_features = torch.cat(gathered_image_features, dim=0)
+ all_text_features = torch.cat(gathered_text_features, dim=0)
+ else:
+ # We gather tensors from all gpus
+ if gather_with_grad:
+ all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
+ all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
+ # all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features, async_op=True), dim=0)
+ # all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features, async_op=True), dim=0)
+ else:
+ gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
+ gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
+ dist.all_gather(gathered_image_features, image_features)
+ dist.all_gather(gathered_text_features, text_features)
+ if not local_loss:
+ # ensure grads for local rank when all_* features don't have a gradient
+ gathered_image_features[rank] = image_features
+ gathered_text_features[rank] = text_features
+ all_image_features = torch.cat(gathered_image_features, dim=0)
+ all_text_features = torch.cat(gathered_text_features, dim=0)
+
+ return all_image_features, all_text_features
+
+
+class ClipLoss(nn.Module):
+
+ def __init__(
+ self,
+ local_loss=False,
+ gather_with_grad=False,
+ cache_labels=False,
+ rank=0,
+ world_size=1,
+ use_horovod=False,
+ smoothing=0.,
+ ):
+ super().__init__()
+ self.local_loss = local_loss
+ self.gather_with_grad = gather_with_grad
+ self.cache_labels = cache_labels
+ self.rank = rank
+ self.world_size = world_size
+ self.use_horovod = use_horovod
+ self.label_smoothing_cross_entropy = LabelSmoothingCrossEntropy(smoothing=smoothing) if smoothing > 0 else None
+
+ # cache state
+ self.prev_num_logits = 0
+ self.labels = {}
+
+ def forward(self, image_features, text_features, logit_scale=1.):
+ device = image_features.device
+ if self.world_size > 1:
+ all_image_features, all_text_features = gather_features(
+ image_features, text_features,
+ self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
+
+ if self.local_loss:
+ logits_per_image = logit_scale * image_features @ all_text_features.T
+ logits_per_text = logit_scale * text_features @ all_image_features.T
+ else:
+ logits_per_image = logit_scale * all_image_features @ all_text_features.T
+ logits_per_text = logits_per_image.T
+ else:
+ logits_per_image = logit_scale * image_features @ text_features.T
+ logits_per_text = logit_scale * text_features @ image_features.T
+ # calculated ground-truth and cache if enabled
+ num_logits = logits_per_image.shape[0]
+ if self.prev_num_logits != num_logits or device not in self.labels:
+ labels = torch.arange(num_logits, device=device, dtype=torch.long)
+ if self.world_size > 1 and self.local_loss:
+ labels = labels + num_logits * self.rank
+ if self.cache_labels:
+ self.labels[device] = labels
+ self.prev_num_logits = num_logits
+ else:
+ labels = self.labels[device]
+
+ if self.label_smoothing_cross_entropy:
+ total_loss = (
+ self.label_smoothing_cross_entropy(logits_per_image, labels) +
+ self.label_smoothing_cross_entropy(logits_per_text, labels)
+ ) / 2
+ else:
+ total_loss = (
+ F.cross_entropy(logits_per_image, labels) +
+ F.cross_entropy(logits_per_text, labels)
+ ) / 2
+
+ acc = None
+ i2t_acc = (logits_per_image.argmax(-1) == labels).sum() / len(logits_per_image)
+ t2i_acc = (logits_per_text.argmax(-1) == labels).sum() / len(logits_per_text)
+ acc = {"i2t": i2t_acc, "t2i": t2i_acc}
+ return total_loss, acc
\ No newline at end of file
diff --git a/eva_clip/model.py b/eva_clip/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..da3bbd755799ced672385d1029ba7ce6d5215b0b
--- /dev/null
+++ b/eva_clip/model.py
@@ -0,0 +1,439 @@
+""" CLIP Model
+
+Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
+"""
+import os
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+from functools import partial
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+try:
+ from .hf_model import HFTextEncoder
+except:
+ HFTextEncoder = None
+from .modified_resnet import ModifiedResNet
+from .timm_model import TimmModel
+from .eva_vit_model import EVAVisionTransformer
+from .transformer import LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
+
+try:
+ from apex.normalization import FusedLayerNorm
+except:
+ FusedLayerNorm = LayerNorm
+ print("Please 'pip install apex'")
+
+try:
+ import xformers.ops as xops
+except ImportError:
+ xops = None
+ print("Please 'pip install xformers'")
+
+@dataclass
+class CLIPVisionCfg:
+ layers: Union[Tuple[int, int, int, int], int] = 12
+ width: int = 768
+ head_width: int = 64
+ mlp_ratio: float = 4.0
+ patch_size: int = 16
+ image_size: Union[Tuple[int, int], int] = 224
+ ls_init_value: Optional[float] = None # layer scale initial value
+ patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
+ global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
+ drop_path_rate: Optional[float] = None # drop path rate
+ timm_model_name: str = None # a valid model name overrides layers, width, patch_size
+ timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
+ timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
+ timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
+ timm_proj_bias: bool = False # enable bias final projection
+ eva_model_name: str = None # a valid eva model name overrides layers, width, patch_size
+ qkv_bias: bool = True
+ fusedLN: bool = False
+ xattn: bool = False
+ postnorm: bool = False
+ rope: bool = False
+ pt_hw_seq_len: int = 16 # 224/14
+ intp_freq: bool = False
+ naiveswiglu: bool = False
+ subln: bool = False
+
+
+@dataclass
+class CLIPTextCfg:
+ context_length: int = 77
+ vocab_size: int = 49408
+ width: int = 512
+ heads: int = 8
+ layers: int = 12
+ ls_init_value: Optional[float] = None # layer scale initial value
+ hf_model_name: str = None
+ hf_tokenizer_name: str = None
+ hf_model_pretrained: bool = True
+ proj: str = 'mlp'
+ pooler_type: str = 'mean_pooler'
+ masked_language_modeling: bool = False
+ fusedLN: bool = False
+ xattn: bool = False
+ attn_mask: bool = True
+
+def get_cast_dtype(precision: str):
+ cast_dtype = None
+ if precision == 'bf16':
+ cast_dtype = torch.bfloat16
+ elif precision == 'fp16':
+ cast_dtype = torch.float16
+ return cast_dtype
+
+
+def _build_vision_tower(
+ embed_dim: int,
+ vision_cfg: CLIPVisionCfg,
+ quick_gelu: bool = False,
+ cast_dtype: Optional[torch.dtype] = None
+):
+ if isinstance(vision_cfg, dict):
+ vision_cfg = CLIPVisionCfg(**vision_cfg)
+
+ # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
+ # memory efficient in recent PyTorch releases (>= 1.10).
+ # NOTE: timm models always use native GELU regardless of quick_gelu flag.
+ act_layer = QuickGELU if quick_gelu else nn.GELU
+
+ if vision_cfg.eva_model_name:
+ vision_heads = vision_cfg.width // vision_cfg.head_width
+ norm_layer = LayerNorm
+
+ visual = EVAVisionTransformer(
+ img_size=vision_cfg.image_size,
+ patch_size=vision_cfg.patch_size,
+ num_classes=embed_dim,
+ use_mean_pooling=vision_cfg.global_average_pool, #False
+ init_values=vision_cfg.ls_init_value,
+ patch_dropout=vision_cfg.patch_dropout,
+ embed_dim=vision_cfg.width,
+ depth=vision_cfg.layers,
+ num_heads=vision_heads,
+ mlp_ratio=vision_cfg.mlp_ratio,
+ qkv_bias=vision_cfg.qkv_bias,
+ drop_path_rate=vision_cfg.drop_path_rate,
+ norm_layer= partial(FusedLayerNorm, eps=1e-6) if vision_cfg.fusedLN else partial(norm_layer, eps=1e-6),
+ xattn=vision_cfg.xattn,
+ rope=vision_cfg.rope,
+ postnorm=vision_cfg.postnorm,
+ pt_hw_seq_len= vision_cfg.pt_hw_seq_len, # 224/14
+ intp_freq= vision_cfg.intp_freq,
+ naiveswiglu= vision_cfg.naiveswiglu,
+ subln= vision_cfg.subln
+ )
+ elif vision_cfg.timm_model_name:
+ visual = TimmModel(
+ vision_cfg.timm_model_name,
+ pretrained=vision_cfg.timm_model_pretrained,
+ pool=vision_cfg.timm_pool,
+ proj=vision_cfg.timm_proj,
+ proj_bias=vision_cfg.timm_proj_bias,
+ embed_dim=embed_dim,
+ image_size=vision_cfg.image_size
+ )
+ act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
+ elif isinstance(vision_cfg.layers, (tuple, list)):
+ vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
+ visual = ModifiedResNet(
+ layers=vision_cfg.layers,
+ output_dim=embed_dim,
+ heads=vision_heads,
+ image_size=vision_cfg.image_size,
+ width=vision_cfg.width
+ )
+ else:
+ vision_heads = vision_cfg.width // vision_cfg.head_width
+ norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
+ visual = VisionTransformer(
+ image_size=vision_cfg.image_size,
+ patch_size=vision_cfg.patch_size,
+ width=vision_cfg.width,
+ layers=vision_cfg.layers,
+ heads=vision_heads,
+ mlp_ratio=vision_cfg.mlp_ratio,
+ ls_init_value=vision_cfg.ls_init_value,
+ patch_dropout=vision_cfg.patch_dropout,
+ global_average_pool=vision_cfg.global_average_pool,
+ output_dim=embed_dim,
+ act_layer=act_layer,
+ norm_layer=norm_layer,
+ )
+
+ return visual
+
+
+def _build_text_tower(
+ embed_dim: int,
+ text_cfg: CLIPTextCfg,
+ quick_gelu: bool = False,
+ cast_dtype: Optional[torch.dtype] = None,
+):
+ if isinstance(text_cfg, dict):
+ text_cfg = CLIPTextCfg(**text_cfg)
+
+ if text_cfg.hf_model_name:
+ text = HFTextEncoder(
+ text_cfg.hf_model_name,
+ output_dim=embed_dim,
+ tokenizer_name=text_cfg.hf_tokenizer_name,
+ proj=text_cfg.proj,
+ pooler_type=text_cfg.pooler_type,
+ masked_language_modeling=text_cfg.masked_language_modeling
+ )
+ else:
+ act_layer = QuickGELU if quick_gelu else nn.GELU
+ norm_layer = LayerNorm
+
+ text = TextTransformer(
+ context_length=text_cfg.context_length,
+ vocab_size=text_cfg.vocab_size,
+ width=text_cfg.width,
+ heads=text_cfg.heads,
+ layers=text_cfg.layers,
+ ls_init_value=text_cfg.ls_init_value,
+ output_dim=embed_dim,
+ act_layer=act_layer,
+ norm_layer= FusedLayerNorm if text_cfg.fusedLN else norm_layer,
+ xattn=text_cfg.xattn,
+ attn_mask=text_cfg.attn_mask,
+ )
+ return text
+
+class CLIP(nn.Module):
+ def __init__(
+ self,
+ embed_dim: int,
+ vision_cfg: CLIPVisionCfg,
+ text_cfg: CLIPTextCfg,
+ quick_gelu: bool = False,
+ cast_dtype: Optional[torch.dtype] = None,
+ ):
+ super().__init__()
+ self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
+
+ text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
+ self.transformer = text.transformer
+ self.vocab_size = text.vocab_size
+ self.token_embedding = text.token_embedding
+ self.positional_embedding = text.positional_embedding
+ self.ln_final = text.ln_final
+ self.text_projection = text.text_projection
+ self.register_buffer('attn_mask', text.attn_mask, persistent=False)
+
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
+
+ def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
+ # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
+ self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.visual.set_grad_checkpointing(enable)
+ self.transformer.grad_checkpointing = enable
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'logit_scale'}
+
+ def encode_image(self, image, normalize: bool = False):
+ features = self.visual(image)
+ return F.normalize(features, dim=-1) if normalize else features
+
+ def encode_text(self, text, normalize: bool = False):
+ cast_dtype = self.transformer.get_cast_dtype()
+
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
+
+ x = x + self.positional_embedding.to(cast_dtype)
+ x = x.permute(1, 0, 2) # NLD -> LND
+ x = self.transformer(x, attn_mask=self.attn_mask)
+ x = x.permute(1, 0, 2) # LND -> NLD
+ x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
+ return F.normalize(x, dim=-1) if normalize else x
+
+ def forward(self, image, text):
+ image_features = self.encode_image(image, normalize=True)
+ text_features = self.encode_text(text, normalize=True)
+ return image_features, text_features, self.logit_scale.exp()
+
+
+class CustomCLIP(nn.Module):
+ def __init__(
+ self,
+ embed_dim: int,
+ vision_cfg: CLIPVisionCfg,
+ text_cfg: CLIPTextCfg,
+ quick_gelu: bool = False,
+ cast_dtype: Optional[torch.dtype] = None,
+ itm_task: bool = False,
+ ):
+ super().__init__()
+ self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
+ self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
+ self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
+
+ def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
+ # lock image tower as per LiT - https://arxiv.org/abs/2111.07991
+ self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
+
+ def lock_text_tower(self, unlocked_layers:int=0, freeze_layer_norm:bool=True):
+ self.text.lock(unlocked_layers, freeze_layer_norm)
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.visual.set_grad_checkpointing(enable)
+ self.text.set_grad_checkpointing(enable)
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'logit_scale'}
+
+ def encode_image(self, image, normalize: bool = False):
+ features = self.visual(image)
+ return F.normalize(features, dim=-1) if normalize else features
+
+ def encode_text(self, text, normalize: bool = False):
+ features = self.text(text)
+ return F.normalize(features, dim=-1) if normalize else features
+
+ def forward(self, image, text):
+ image_features = self.encode_image(image, normalize=True)
+ text_features = self.encode_text(text, normalize=True)
+ return image_features, text_features, self.logit_scale.exp()
+
+
+def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
+ """Convert applicable model parameters to low-precision (bf16 or fp16)"""
+
+ def _convert_weights(l):
+
+ if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
+ l.weight.data = l.weight.data.to(dtype)
+ if l.bias is not None:
+ l.bias.data = l.bias.data.to(dtype)
+
+ if isinstance(l, (nn.MultiheadAttention, Attention)):
+ for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
+ tensor = getattr(l, attr, None)
+ if tensor is not None:
+ tensor.data = tensor.data.to(dtype)
+
+ if isinstance(l, nn.Parameter):
+ l.data = l.data.to(dtype)
+
+ for name in ["text_projection", "proj"]:
+ if hasattr(l, name) and isinstance(l, nn.Parameter):
+ attr = getattr(l, name, None)
+ if attr is not None:
+ attr.data = attr.data.to(dtype)
+
+ model.apply(_convert_weights)
+
+
+convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
+
+
+# used to maintain checkpoint compatibility
+def convert_to_custom_text_state_dict(state_dict: dict):
+ if 'text_projection' in state_dict:
+ # old format state_dict, move text tower -> .text
+ new_state_dict = {}
+ for k, v in state_dict.items():
+ if any(k.startswith(p) for p in (
+ 'text_projection',
+ 'positional_embedding',
+ 'token_embedding',
+ 'transformer',
+ 'ln_final',
+ 'logit_scale'
+ )):
+ k = 'text.' + k
+ new_state_dict[k] = v
+ return new_state_dict
+ return state_dict
+
+
+def build_model_from_openai_state_dict(
+ state_dict: dict,
+ quick_gelu=True,
+ cast_dtype=torch.float16,
+):
+ vit = "visual.proj" in state_dict
+
+ if vit:
+ vision_width = state_dict["visual.conv1.weight"].shape[0]
+ vision_layers = len(
+ [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
+ vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
+ grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
+ image_size = vision_patch_size * grid_size
+ else:
+ counts: list = [
+ len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
+ vision_layers = tuple(counts)
+ vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
+ output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
+ vision_patch_size = None
+ assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
+ image_size = output_width * 32
+
+ embed_dim = state_dict["text_projection"].shape[1]
+ context_length = state_dict["positional_embedding"].shape[0]
+ vocab_size = state_dict["token_embedding.weight"].shape[0]
+ transformer_width = state_dict["ln_final.weight"].shape[0]
+ transformer_heads = transformer_width // 64
+ transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
+
+ vision_cfg = CLIPVisionCfg(
+ layers=vision_layers,
+ width=vision_width,
+ patch_size=vision_patch_size,
+ image_size=image_size,
+ )
+ text_cfg = CLIPTextCfg(
+ context_length=context_length,
+ vocab_size=vocab_size,
+ width=transformer_width,
+ heads=transformer_heads,
+ layers=transformer_layers
+ )
+ model = CLIP(
+ embed_dim,
+ vision_cfg=vision_cfg,
+ text_cfg=text_cfg,
+ quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
+ cast_dtype=cast_dtype,
+ )
+
+ for key in ["input_resolution", "context_length", "vocab_size"]:
+ state_dict.pop(key, None)
+
+ convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
+ model.load_state_dict(state_dict)
+ return model.eval()
+
+
+def trace_model(model, batch_size=256, device=torch.device('cpu')):
+ model.eval()
+ image_size = model.visual.image_size
+ example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
+ example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
+ model = torch.jit.trace_module(
+ model,
+ inputs=dict(
+ forward=(example_images, example_text),
+ encode_text=(example_text,),
+ encode_image=(example_images,)
+ ))
+ model.visual.image_size = image_size
+ return model
diff --git a/eva_clip/model_configs/EVA01-CLIP-B-16.json b/eva_clip/model_configs/EVA01-CLIP-B-16.json
new file mode 100644
index 0000000000000000000000000000000000000000..aad2058003962a4ab286bf4e1ae956288af34e62
--- /dev/null
+++ b/eva_clip/model_configs/EVA01-CLIP-B-16.json
@@ -0,0 +1,19 @@
+{
+ "embed_dim": 512,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 12,
+ "width": 768,
+ "patch_size": 16,
+ "eva_model_name": "eva-clip-b-16",
+ "ls_init_value": 0.1,
+ "drop_path_rate": 0.0
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 512,
+ "heads": 8,
+ "layers": 12
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json b/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json
new file mode 100644
index 0000000000000000000000000000000000000000..100279572ff6d1bcca601f0eb526b4d4ff174c7d
--- /dev/null
+++ b/eva_clip/model_configs/EVA01-CLIP-g-14-plus.json
@@ -0,0 +1,24 @@
+{
+ "embed_dim": 1024,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 40,
+ "width": 1408,
+ "head_width": 88,
+ "mlp_ratio": 4.3637,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-g-14-x",
+ "drop_path_rate": 0,
+ "xattn": true,
+ "fusedLN": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 1024,
+ "heads": 16,
+ "layers": 24,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/model_configs/EVA01-CLIP-g-14.json b/eva_clip/model_configs/EVA01-CLIP-g-14.json
new file mode 100644
index 0000000000000000000000000000000000000000..5d338b4e6104241d1f0304ee82400035d5385332
--- /dev/null
+++ b/eva_clip/model_configs/EVA01-CLIP-g-14.json
@@ -0,0 +1,24 @@
+{
+ "embed_dim": 1024,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 40,
+ "width": 1408,
+ "head_width": 88,
+ "mlp_ratio": 4.3637,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-g-14-x",
+ "drop_path_rate": 0.4,
+ "xattn": true,
+ "fusedLN": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 768,
+ "heads": 12,
+ "layers": 12,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/model_configs/EVA02-CLIP-B-16.json b/eva_clip/model_configs/EVA02-CLIP-B-16.json
new file mode 100644
index 0000000000000000000000000000000000000000..e4a6e723f77033caa341ddf9b5be1787d64ad42c
--- /dev/null
+++ b/eva_clip/model_configs/EVA02-CLIP-B-16.json
@@ -0,0 +1,29 @@
+{
+ "embed_dim": 512,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 12,
+ "width": 768,
+ "head_width": 64,
+ "patch_size": 16,
+ "mlp_ratio": 2.6667,
+ "eva_model_name": "eva-clip-b-16-X",
+ "drop_path_rate": 0.0,
+ "xattn": true,
+ "fusedLN": true,
+ "rope": true,
+ "pt_hw_seq_len": 16,
+ "intp_freq": true,
+ "naiveswiglu": true,
+ "subln": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 512,
+ "heads": 8,
+ "layers": 12,
+ "xattn": true,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/model_configs/EVA02-CLIP-L-14-336.json b/eva_clip/model_configs/EVA02-CLIP-L-14-336.json
new file mode 100644
index 0000000000000000000000000000000000000000..3e1d124e1118911c5ad7b1ce85df195aca363ac4
--- /dev/null
+++ b/eva_clip/model_configs/EVA02-CLIP-L-14-336.json
@@ -0,0 +1,29 @@
+{
+ "embed_dim": 768,
+ "vision_cfg": {
+ "image_size": 336,
+ "layers": 24,
+ "width": 1024,
+ "drop_path_rate": 0,
+ "head_width": 64,
+ "mlp_ratio": 2.6667,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-l-14-336",
+ "xattn": true,
+ "fusedLN": true,
+ "rope": true,
+ "pt_hw_seq_len": 16,
+ "intp_freq": true,
+ "naiveswiglu": true,
+ "subln": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 768,
+ "heads": 12,
+ "layers": 12,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/model_configs/EVA02-CLIP-L-14.json b/eva_clip/model_configs/EVA02-CLIP-L-14.json
new file mode 100644
index 0000000000000000000000000000000000000000..03b22ad3cfb92f9c843b9ec8d672e57e7a9ba4a2
--- /dev/null
+++ b/eva_clip/model_configs/EVA02-CLIP-L-14.json
@@ -0,0 +1,29 @@
+{
+ "embed_dim": 768,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 24,
+ "width": 1024,
+ "drop_path_rate": 0,
+ "head_width": 64,
+ "mlp_ratio": 2.6667,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-l-14",
+ "xattn": true,
+ "fusedLN": true,
+ "rope": true,
+ "pt_hw_seq_len": 16,
+ "intp_freq": true,
+ "naiveswiglu": true,
+ "subln": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 768,
+ "heads": 12,
+ "layers": 12,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json b/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json
new file mode 100644
index 0000000000000000000000000000000000000000..aa04e2545ac1e015daae2c10133956ce969524f7
--- /dev/null
+++ b/eva_clip/model_configs/EVA02-CLIP-bigE-14-plus.json
@@ -0,0 +1,25 @@
+{
+ "embed_dim": 1024,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 64,
+ "width": 1792,
+ "head_width": 112,
+ "mlp_ratio": 8.571428571428571,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-4b-14-x",
+ "drop_path_rate": 0,
+ "xattn": true,
+ "postnorm": true,
+ "fusedLN": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 1280,
+ "heads": 20,
+ "layers": 32,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
diff --git a/eva_clip/model_configs/EVA02-CLIP-bigE-14.json b/eva_clip/model_configs/EVA02-CLIP-bigE-14.json
new file mode 100644
index 0000000000000000000000000000000000000000..747ffccc8bd49dbb6701b58e15843b7fe3754e64
--- /dev/null
+++ b/eva_clip/model_configs/EVA02-CLIP-bigE-14.json
@@ -0,0 +1,25 @@
+{
+ "embed_dim": 1024,
+ "vision_cfg": {
+ "image_size": 224,
+ "layers": 64,
+ "width": 1792,
+ "head_width": 112,
+ "mlp_ratio": 8.571428571428571,
+ "patch_size": 14,
+ "eva_model_name": "eva-clip-4b-14-x",
+ "drop_path_rate": 0,
+ "xattn": true,
+ "postnorm": true,
+ "fusedLN": true
+ },
+ "text_cfg": {
+ "context_length": 77,
+ "vocab_size": 49408,
+ "width": 1024,
+ "heads": 16,
+ "layers": 24,
+ "xattn": false,
+ "fusedLN": true
+ }
+}
\ No newline at end of file
diff --git a/eva_clip/modified_resnet.py b/eva_clip/modified_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..151bfdd0b052d3db1b160d8b2299c33a6e944a4b
--- /dev/null
+++ b/eva_clip/modified_resnet.py
@@ -0,0 +1,181 @@
+from collections import OrderedDict
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from eva_clip.utils import freeze_batch_norm_2d
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1):
+ super().__init__()
+
+ # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
+ self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.act1 = nn.ReLU(inplace=True)
+
+ self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.act2 = nn.ReLU(inplace=True)
+
+ self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
+
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
+ self.act3 = nn.ReLU(inplace=True)
+
+ self.downsample = None
+ self.stride = stride
+
+ if stride > 1 or inplanes != planes * Bottleneck.expansion:
+ # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
+ self.downsample = nn.Sequential(OrderedDict([
+ ("-1", nn.AvgPool2d(stride)),
+ ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
+ ("1", nn.BatchNorm2d(planes * self.expansion))
+ ]))
+
+ def forward(self, x: torch.Tensor):
+ identity = x
+
+ out = self.act1(self.bn1(self.conv1(x)))
+ out = self.act2(self.bn2(self.conv2(out)))
+ out = self.avgpool(out)
+ out = self.bn3(self.conv3(out))
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+ out = self.act3(out)
+ return out
+
+
+class AttentionPool2d(nn.Module):
+ def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
+ super().__init__()
+ self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
+ self.k_proj = nn.Linear(embed_dim, embed_dim)
+ self.q_proj = nn.Linear(embed_dim, embed_dim)
+ self.v_proj = nn.Linear(embed_dim, embed_dim)
+ self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
+ self.num_heads = num_heads
+
+ def forward(self, x):
+ x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
+ x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
+ x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
+ x, _ = F.multi_head_attention_forward(
+ query=x, key=x, value=x,
+ embed_dim_to_check=x.shape[-1],
+ num_heads=self.num_heads,
+ q_proj_weight=self.q_proj.weight,
+ k_proj_weight=self.k_proj.weight,
+ v_proj_weight=self.v_proj.weight,
+ in_proj_weight=None,
+ in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
+ bias_k=None,
+ bias_v=None,
+ add_zero_attn=False,
+ dropout_p=0.,
+ out_proj_weight=self.c_proj.weight,
+ out_proj_bias=self.c_proj.bias,
+ use_separate_proj_weight=True,
+ training=self.training,
+ need_weights=False
+ )
+
+ return x[0]
+
+
+class ModifiedResNet(nn.Module):
+ """
+ A ResNet class that is similar to torchvision's but contains the following changes:
+ - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
+ - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
+ - The final pooling layer is a QKV attention instead of an average pool
+ """
+
+ def __init__(self, layers, output_dim, heads, image_size=224, width=64):
+ super().__init__()
+ self.output_dim = output_dim
+ self.image_size = image_size
+
+ # the 3-layer stem
+ self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(width // 2)
+ self.act1 = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(width // 2)
+ self.act2 = nn.ReLU(inplace=True)
+ self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(width)
+ self.act3 = nn.ReLU(inplace=True)
+ self.avgpool = nn.AvgPool2d(2)
+
+ # residual layers
+ self._inplanes = width # this is a *mutable* variable used during construction
+ self.layer1 = self._make_layer(width, layers[0])
+ self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
+ self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
+ self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
+
+ embed_dim = width * 32 # the ResNet feature dimension
+ self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
+
+ self.init_parameters()
+
+ def _make_layer(self, planes, blocks, stride=1):
+ layers = [Bottleneck(self._inplanes, planes, stride)]
+
+ self._inplanes = planes * Bottleneck.expansion
+ for _ in range(1, blocks):
+ layers.append(Bottleneck(self._inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def init_parameters(self):
+ if self.attnpool is not None:
+ std = self.attnpool.c_proj.in_features ** -0.5
+ nn.init.normal_(self.attnpool.q_proj.weight, std=std)
+ nn.init.normal_(self.attnpool.k_proj.weight, std=std)
+ nn.init.normal_(self.attnpool.v_proj.weight, std=std)
+ nn.init.normal_(self.attnpool.c_proj.weight, std=std)
+
+ for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
+ for name, param in resnet_block.named_parameters():
+ if name.endswith("bn3.weight"):
+ nn.init.zeros_(param)
+
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
+ assert unlocked_groups == 0, 'partial locking not currently supported for this model'
+ for param in self.parameters():
+ param.requires_grad = False
+ if freeze_bn_stats:
+ freeze_batch_norm_2d(self)
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ # FIXME support for non-transformer
+ pass
+
+ def stem(self, x):
+ x = self.act1(self.bn1(self.conv1(x)))
+ x = self.act2(self.bn2(self.conv2(x)))
+ x = self.act3(self.bn3(self.conv3(x)))
+ x = self.avgpool(x)
+ return x
+
+ def forward(self, x):
+ x = self.stem(x)
+ x = self.layer1(x)
+ x = self.layer2(x)
+ x = self.layer3(x)
+ x = self.layer4(x)
+ x = self.attnpool(x)
+
+ return x
diff --git a/eva_clip/openai.py b/eva_clip/openai.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc4e13e876d6a7a3463b457e62c517cb063b1356
--- /dev/null
+++ b/eva_clip/openai.py
@@ -0,0 +1,144 @@
+""" OpenAI pretrained model functions
+
+Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
+"""
+
+import os
+import warnings
+from typing import List, Optional, Union
+
+import torch
+
+from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
+from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
+
+__all__ = ["list_openai_models", "load_openai_model"]
+
+
+def list_openai_models() -> List[str]:
+ """Returns the names of available CLIP models"""
+ return list_pretrained_models_by_tag('openai')
+
+
+def load_openai_model(
+ name: str,
+ precision: Optional[str] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ jit: bool = True,
+ cache_dir: Optional[str] = None,
+):
+ """Load a CLIP model
+
+ Parameters
+ ----------
+ name : str
+ A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
+ precision: str
+ Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
+ device : Union[str, torch.device]
+ The device to put the loaded model
+ jit : bool
+ Whether to load the optimized JIT model (default) or more hackable non-JIT model.
+ cache_dir : Optional[str]
+ The directory to cache the downloaded model weights
+
+ Returns
+ -------
+ model : torch.nn.Module
+ The CLIP model
+ preprocess : Callable[[PIL.Image], torch.Tensor]
+ A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
+ """
+ if device is None:
+ device = "cuda" if torch.cuda.is_available() else "cpu"
+ if precision is None:
+ precision = 'fp32' if device == 'cpu' else 'fp16'
+
+ if get_pretrained_url(name, 'openai'):
+ model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
+ elif os.path.isfile(name):
+ model_path = name
+ else:
+ raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
+
+ try:
+ # loading JIT archive
+ model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
+ state_dict = None
+ except RuntimeError:
+ # loading saved state dict
+ if jit:
+ warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
+ jit = False
+ state_dict = torch.load(model_path, map_location="cpu")
+
+ if not jit:
+ # Build a non-jit model from the OpenAI jitted model state dict
+ cast_dtype = get_cast_dtype(precision)
+ try:
+ model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
+ except KeyError:
+ sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
+ model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
+
+ # model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
+ model = model.to(device)
+ if precision.startswith('amp') or precision == 'fp32':
+ model.float()
+ elif precision == 'bf16':
+ convert_weights_to_lp(model, dtype=torch.bfloat16)
+
+ return model
+
+ # patch the device names
+ device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
+ device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
+
+ def patch_device(module):
+ try:
+ graphs = [module.graph] if hasattr(module, "graph") else []
+ except RuntimeError:
+ graphs = []
+
+ if hasattr(module, "forward1"):
+ graphs.append(module.forward1.graph)
+
+ for graph in graphs:
+ for node in graph.findAllNodes("prim::Constant"):
+ if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
+ node.copyAttributes(device_node)
+
+ model.apply(patch_device)
+ patch_device(model.encode_image)
+ patch_device(model.encode_text)
+
+ # patch dtype to float32 (typically for CPU)
+ if precision == 'fp32':
+ float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
+ float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
+ float_node = float_input.node()
+
+ def patch_float(module):
+ try:
+ graphs = [module.graph] if hasattr(module, "graph") else []
+ except RuntimeError:
+ graphs = []
+
+ if hasattr(module, "forward1"):
+ graphs.append(module.forward1.graph)
+
+ for graph in graphs:
+ for node in graph.findAllNodes("aten::to"):
+ inputs = list(node.inputs())
+ for i in [1, 2]: # dtype can be the second or third argument to aten::to()
+ if inputs[i].node()["value"] == 5:
+ inputs[i].node().copyAttributes(float_node)
+
+ model.apply(patch_float)
+ patch_float(model.encode_image)
+ patch_float(model.encode_text)
+ model.float()
+
+ # ensure image_size attr available at consistent location for both jit and non-jit
+ model.visual.image_size = model.input_resolution.item()
+ return model
diff --git a/eva_clip/pretrained.py b/eva_clip/pretrained.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1e55dcf36a0e7dbd4c13b4ca2d7cb460e4c3547
--- /dev/null
+++ b/eva_clip/pretrained.py
@@ -0,0 +1,332 @@
+import hashlib
+import os
+import urllib
+import warnings
+from functools import partial
+from typing import Dict, Union
+
+from tqdm import tqdm
+
+try:
+ from huggingface_hub import hf_hub_download
+ _has_hf_hub = True
+except ImportError:
+ hf_hub_download = None
+ _has_hf_hub = False
+
+
+def _pcfg(url='', hf_hub='', filename='', mean=None, std=None):
+ return dict(
+ url=url,
+ hf_hub=hf_hub,
+ mean=mean,
+ std=std,
+ )
+
+_VITB32 = dict(
+ openai=_pcfg(
+ "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
+ laion400m_e31=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
+ laion400m_e32=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
+ laion2b_e16=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
+ laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/')
+)
+
+_VITB32_quickgelu = dict(
+ openai=_pcfg(
+ "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
+ laion400m_e31=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
+ laion400m_e32=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
+)
+
+_VITB16 = dict(
+ openai=_pcfg(
+ "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
+ laion400m_e31=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
+ laion400m_e32=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
+ laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'),
+)
+
+_EVAB16 = dict(
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_B_psz14to16.pt'),
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_B_psz16_s8B.pt'),
+)
+
+_VITB16_PLUS_240 = dict(
+ laion400m_e31=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
+ laion400m_e32=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
+)
+
+_VITL14 = dict(
+ openai=_pcfg(
+ "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
+ laion400m_e31=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
+ laion400m_e32=_pcfg(
+ "https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
+ laion2b_s32b_b82k=_pcfg(
+ hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/',
+ mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
+)
+
+_EVAL14 = dict(
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_L_psz14.pt'),
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_s4B.pt'),
+)
+
+_VITL14_336 = dict(
+ openai=_pcfg(
+ "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
+)
+
+_EVAL14_336 = dict(
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_336_psz14_s6B.pt'),
+ eva_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
+ eva02_clip_224to336=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_L_psz14_224to336.pt'),
+)
+
+_VITH14 = dict(
+ laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'),
+)
+
+_VITg14 = dict(
+ laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'),
+ laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s34B-b88K/'),
+)
+
+_EVAg14 = dict(
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
+ eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
+ eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_psz14_s11B.pt'),
+)
+
+_EVAg14_PLUS = dict(
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/'),
+ eva01=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_g_psz14.pt'),
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
+ eva01_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA01_CLIP_g_14_plus_psz14_s11B.pt'),
+)
+
+_VITbigG14 = dict(
+ laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'),
+)
+
+_EVAbigE14 = dict(
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_s4B.pt'),
+)
+
+_EVAbigE14_PLUS = dict(
+ eva=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
+ eva02=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_E_psz14.pt'),
+ eva_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
+ eva02_clip=_pcfg(hf_hub='QuanSun/EVA-CLIP/EVA02_CLIP_E_psz14_plus_s9B.pt'),
+)
+
+
+_PRETRAINED = {
+ # "ViT-B-32": _VITB32,
+ "OpenaiCLIP-B-32": _VITB32,
+ "OpenCLIP-B-32": _VITB32,
+
+ # "ViT-B-32-quickgelu": _VITB32_quickgelu,
+ "OpenaiCLIP-B-32-quickgelu": _VITB32_quickgelu,
+ "OpenCLIP-B-32-quickgelu": _VITB32_quickgelu,
+
+ # "ViT-B-16": _VITB16,
+ "OpenaiCLIP-B-16": _VITB16,
+ "OpenCLIP-B-16": _VITB16,
+
+ "EVA02-B-16": _EVAB16,
+ "EVA02-CLIP-B-16": _EVAB16,
+
+ # "ViT-B-16-plus-240": _VITB16_PLUS_240,
+ "OpenCLIP-B-16-plus-240": _VITB16_PLUS_240,
+
+ # "ViT-L-14": _VITL14,
+ "OpenaiCLIP-L-14": _VITL14,
+ "OpenCLIP-L-14": _VITL14,
+
+ "EVA02-L-14": _EVAL14,
+ "EVA02-CLIP-L-14": _EVAL14,
+
+ # "ViT-L-14-336": _VITL14_336,
+ "OpenaiCLIP-L-14-336": _VITL14_336,
+
+ "EVA02-CLIP-L-14-336": _EVAL14_336,
+
+ # "ViT-H-14": _VITH14,
+ # "ViT-g-14": _VITg14,
+ "OpenCLIP-H-14": _VITH14,
+ "OpenCLIP-g-14": _VITg14,
+
+ "EVA01-CLIP-g-14": _EVAg14,
+ "EVA01-CLIP-g-14-plus": _EVAg14_PLUS,
+
+ # "ViT-bigG-14": _VITbigG14,
+ "OpenCLIP-bigG-14": _VITbigG14,
+
+ "EVA02-CLIP-bigE-14": _EVAbigE14,
+ "EVA02-CLIP-bigE-14-plus": _EVAbigE14_PLUS,
+}
+
+
+def _clean_tag(tag: str):
+ # normalize pretrained tags
+ return tag.lower().replace('-', '_')
+
+
+def list_pretrained(as_str: bool = False):
+ """ returns list of pretrained models
+ Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
+ """
+ return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
+
+
+def list_pretrained_models_by_tag(tag: str):
+ """ return all models having the specified pretrain tag """
+ models = []
+ tag = _clean_tag(tag)
+ for k in _PRETRAINED.keys():
+ if tag in _PRETRAINED[k]:
+ models.append(k)
+ return models
+
+
+def list_pretrained_tags_by_model(model: str):
+ """ return all pretrain tags for the specified model architecture """
+ tags = []
+ if model in _PRETRAINED:
+ tags.extend(_PRETRAINED[model].keys())
+ return tags
+
+
+def is_pretrained_cfg(model: str, tag: str):
+ if model not in _PRETRAINED:
+ return False
+ return _clean_tag(tag) in _PRETRAINED[model]
+
+
+def get_pretrained_cfg(model: str, tag: str):
+ if model not in _PRETRAINED:
+ return {}
+ model_pretrained = _PRETRAINED[model]
+ return model_pretrained.get(_clean_tag(tag), {})
+
+
+def get_pretrained_url(model: str, tag: str):
+ cfg = get_pretrained_cfg(model, _clean_tag(tag))
+ return cfg.get('url', '')
+
+
+def download_pretrained_from_url(
+ url: str,
+ cache_dir: Union[str, None] = None,
+):
+ if not cache_dir:
+ cache_dir = os.path.expanduser("~/.cache/clip")
+ os.makedirs(cache_dir, exist_ok=True)
+ filename = os.path.basename(url)
+
+ if 'openaipublic' in url:
+ expected_sha256 = url.split("/")[-2]
+ elif 'mlfoundations' in url:
+ expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
+ else:
+ expected_sha256 = ''
+
+ download_target = os.path.join(cache_dir, filename)
+
+ if os.path.exists(download_target) and not os.path.isfile(download_target):
+ raise RuntimeError(f"{download_target} exists and is not a regular file")
+
+ if os.path.isfile(download_target):
+ if expected_sha256:
+ if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
+ return download_target
+ else:
+ warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
+ else:
+ return download_target
+
+ with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
+ with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
+ while True:
+ buffer = source.read(8192)
+ if not buffer:
+ break
+
+ output.write(buffer)
+ loop.update(len(buffer))
+
+ if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
+ raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
+
+ return download_target
+
+
+def has_hf_hub(necessary=False):
+ if not _has_hf_hub and necessary:
+ # if no HF Hub module installed, and it is necessary to continue, raise error
+ raise RuntimeError(
+ 'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
+ return _has_hf_hub
+
+
+def download_pretrained_from_hf(
+ model_id: str,
+ filename: str = 'open_clip_pytorch_model.bin',
+ revision=None,
+ cache_dir: Union[str, None] = None,
+):
+ has_hf_hub(True)
+ cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
+ return cached_file
+
+
+def download_pretrained(
+ cfg: Dict,
+ force_hf_hub: bool = False,
+ cache_dir: Union[str, None] = None,
+):
+ target = ''
+ if not cfg:
+ return target
+
+ download_url = cfg.get('url', '')
+ download_hf_hub = cfg.get('hf_hub', '')
+ if download_hf_hub and force_hf_hub:
+ # use HF hub even if url exists
+ download_url = ''
+
+ if download_url:
+ target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
+ elif download_hf_hub:
+ has_hf_hub(True)
+ # we assume the hf_hub entries in pretrained config combine model_id + filename in
+ # 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
+ # use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
+ model_id, filename = os.path.split(download_hf_hub)
+ if filename:
+ target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
+ else:
+ target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
+
+ return target
diff --git a/eva_clip/rope.py b/eva_clip/rope.py
new file mode 100644
index 0000000000000000000000000000000000000000..69030c35ea7b6b4f298daebbee5717f3fa1254ab
--- /dev/null
+++ b/eva_clip/rope.py
@@ -0,0 +1,137 @@
+from math import pi
+import torch
+from torch import nn
+from einops import rearrange, repeat
+import logging
+
+def broadcat(tensors, dim = -1):
+ num_tensors = len(tensors)
+ shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
+ assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
+ shape_len = list(shape_lens)[0]
+ dim = (dim + shape_len) if dim < 0 else dim
+ dims = list(zip(*map(lambda t: list(t.shape), tensors)))
+ expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
+ assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
+ max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
+ expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
+ expanded_dims.insert(dim, (dim, dims[dim]))
+ expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
+ tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
+ return torch.cat(tensors, dim = dim)
+
+def rotate_half(x):
+ x = rearrange(x, '... (d r) -> ... d r', r = 2)
+ x1, x2 = x.unbind(dim = -1)
+ x = torch.stack((-x2, x1), dim = -1)
+ return rearrange(x, '... d r -> ... (d r)')
+
+
+class VisionRotaryEmbedding(nn.Module):
+ def __init__(
+ self,
+ dim,
+ pt_seq_len,
+ ft_seq_len=None,
+ custom_freqs = None,
+ freqs_for = 'lang',
+ theta = 10000,
+ max_freq = 10,
+ num_freqs = 1,
+ ):
+ super().__init__()
+ if custom_freqs:
+ freqs = custom_freqs
+ elif freqs_for == 'lang':
+ freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
+ elif freqs_for == 'pixel':
+ freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
+ elif freqs_for == 'constant':
+ freqs = torch.ones(num_freqs).float()
+ else:
+ raise ValueError(f'unknown modality {freqs_for}')
+
+ if ft_seq_len is None: ft_seq_len = pt_seq_len
+ t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
+
+ freqs_h = torch.einsum('..., f -> ... f', t, freqs)
+ freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)
+
+ freqs_w = torch.einsum('..., f -> ... f', t, freqs)
+ freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)
+
+ freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim = -1)
+
+ self.register_buffer("freqs_cos", freqs.cos())
+ self.register_buffer("freqs_sin", freqs.sin())
+
+ logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
+
+ def forward(self, t, start_index = 0):
+ rot_dim = self.freqs_cos.shape[-1]
+ end_index = start_index + rot_dim
+ assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
+ t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
+ t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin)
+
+ return torch.cat((t_left, t, t_right), dim = -1)
+
+class VisionRotaryEmbeddingFast(nn.Module):
+ def __init__(
+ self,
+ dim,
+ pt_seq_len,
+ ft_seq_len=None,
+ custom_freqs = None,
+ freqs_for = 'lang',
+ theta = 10000,
+ max_freq = 10,
+ num_freqs = 1,
+ patch_dropout = 0.
+ ):
+ super().__init__()
+ if custom_freqs:
+ freqs = custom_freqs
+ elif freqs_for == 'lang':
+ freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
+ elif freqs_for == 'pixel':
+ freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
+ elif freqs_for == 'constant':
+ freqs = torch.ones(num_freqs).float()
+ else:
+ raise ValueError(f'unknown modality {freqs_for}')
+
+ if ft_seq_len is None: ft_seq_len = pt_seq_len
+ t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len
+
+ freqs = torch.einsum('..., f -> ... f', t, freqs)
+ freqs = repeat(freqs, '... n -> ... (n r)', r = 2)
+ freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)
+
+ freqs_cos = freqs.cos().view(-1, freqs.shape[-1])
+ freqs_sin = freqs.sin().view(-1, freqs.shape[-1])
+
+ self.patch_dropout = patch_dropout
+
+ self.register_buffer("freqs_cos", freqs_cos)
+ self.register_buffer("freqs_sin", freqs_sin)
+
+ logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')
+
+ def forward(self, t, patch_indices_keep=None):
+ if patch_indices_keep is not None:
+ batch = t.size()[0]
+ batch_indices = torch.arange(batch)
+ batch_indices = batch_indices[..., None]
+
+ freqs_cos = repeat(self.freqs_cos, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
+ freqs_sin = repeat(self.freqs_sin, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])
+
+ freqs_cos = freqs_cos[batch_indices, patch_indices_keep]
+ freqs_cos = rearrange(freqs_cos, 'n i m j -> n m i j')
+ freqs_sin = freqs_sin[batch_indices, patch_indices_keep]
+ freqs_sin = rearrange(freqs_sin, 'n i m j -> n m i j')
+
+ return t * freqs_cos + rotate_half(t) * freqs_sin
+
+ return t * self.freqs_cos + rotate_half(t) * self.freqs_sin
\ No newline at end of file
diff --git a/eva_clip/timm_model.py b/eva_clip/timm_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..b58122c0b84fbda9e51867342823222234e17505
--- /dev/null
+++ b/eva_clip/timm_model.py
@@ -0,0 +1,122 @@
+""" timm model adapter
+
+Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
+"""
+import logging
+from collections import OrderedDict
+
+import torch
+import torch.nn as nn
+
+try:
+ import timm
+ from timm.models.layers import Mlp, to_2tuple
+ try:
+ # old timm imports < 0.8.1
+ from timm.models.layers.attention_pool2d import RotAttentionPool2d
+ from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
+ except ImportError:
+ # new timm imports >= 0.8.1
+ from timm.layers import RotAttentionPool2d
+ from timm.layers import AttentionPool2d as AbsAttentionPool2d
+except ImportError:
+ timm = None
+
+from .utils import freeze_batch_norm_2d
+
+
+class TimmModel(nn.Module):
+ """ timm model adapter
+ # FIXME this adapter is a work in progress, may change in ways that break weight compat
+ """
+
+ def __init__(
+ self,
+ model_name,
+ embed_dim,
+ image_size=224,
+ pool='avg',
+ proj='linear',
+ proj_bias=False,
+ drop=0.,
+ pretrained=False):
+ super().__init__()
+ if timm is None:
+ raise RuntimeError("Please `pip install timm` to use timm models.")
+
+ self.image_size = to_2tuple(image_size)
+ self.trunk = timm.create_model(model_name, pretrained=pretrained)
+ feat_size = self.trunk.default_cfg.get('pool_size', None)
+ feature_ndim = 1 if not feat_size else 2
+ if pool in ('abs_attn', 'rot_attn'):
+ assert feature_ndim == 2
+ # if attn pooling used, remove both classifier and default pool
+ self.trunk.reset_classifier(0, global_pool='')
+ else:
+ # reset global pool if pool config set, otherwise leave as network default
+ reset_kwargs = dict(global_pool=pool) if pool else {}
+ self.trunk.reset_classifier(0, **reset_kwargs)
+ prev_chs = self.trunk.num_features
+
+ head_layers = OrderedDict()
+ if pool == 'abs_attn':
+ head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
+ prev_chs = embed_dim
+ elif pool == 'rot_attn':
+ head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
+ prev_chs = embed_dim
+ else:
+ assert proj, 'projection layer needed if non-attention pooling is used.'
+
+ # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
+ if proj == 'linear':
+ head_layers['drop'] = nn.Dropout(drop)
+ head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)
+ elif proj == 'mlp':
+ head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop, bias=(True, proj_bias))
+
+ self.head = nn.Sequential(head_layers)
+
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
+ """ lock modules
+ Args:
+ unlocked_groups (int): leave last n layer groups unlocked (default: 0)
+ """
+ if not unlocked_groups:
+ # lock full model
+ for param in self.trunk.parameters():
+ param.requires_grad = False
+ if freeze_bn_stats:
+ freeze_batch_norm_2d(self.trunk)
+ else:
+ # NOTE: partial freeze requires latest timm (master) branch and is subject to change
+ try:
+ # FIXME import here until API stable and in an official release
+ from timm.models.helpers import group_parameters, group_modules
+ except ImportError:
+ raise RuntimeError(
+ 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
+ matcher = self.trunk.group_matcher()
+ gparams = group_parameters(self.trunk, matcher)
+ max_layer_id = max(gparams.keys())
+ max_layer_id = max_layer_id - unlocked_groups
+ for group_idx in range(max_layer_id + 1):
+ group = gparams[group_idx]
+ for param in group:
+ self.trunk.get_parameter(param).requires_grad = False
+ if freeze_bn_stats:
+ gmodules = group_modules(self.trunk, matcher, reverse=True)
+ gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
+ freeze_batch_norm_2d(self.trunk, gmodules)
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ try:
+ self.trunk.set_grad_checkpointing(enable)
+ except Exception as e:
+ logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')
+
+ def forward(self, x):
+ x = self.trunk(x)
+ x = self.head(x)
+ return x
diff --git a/eva_clip/tokenizer.py b/eva_clip/tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..41482f82aebbf197f4ee4e6c07c845a0d69dd7d6
--- /dev/null
+++ b/eva_clip/tokenizer.py
@@ -0,0 +1,201 @@
+""" CLIP tokenizer
+
+Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
+"""
+import gzip
+import html
+import os
+from functools import lru_cache
+from typing import Union, List
+
+import ftfy
+import regex as re
+import torch
+
+# https://stackoverflow.com/q/62691279
+import os
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+
+@lru_cache()
+def default_bpe():
+ return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
+
+
+@lru_cache()
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
+ The reversible bpe codes work on unicode strings.
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
+ """
+ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8+n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+def get_pairs(word):
+ """Return set of symbol pairs in a word.
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+def basic_clean(text):
+ text = ftfy.fix_text(text)
+ text = html.unescape(html.unescape(text))
+ return text.strip()
+
+
+def whitespace_clean(text):
+ text = re.sub(r'\s+', ' ', text)
+ text = text.strip()
+ return text
+
+
+class SimpleTokenizer(object):
+ def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
+ merges = merges[1:49152-256-2+1]
+ merges = [tuple(merge.split()) for merge in merges]
+ vocab = list(bytes_to_unicode().values())
+ vocab = vocab + [v+'' for v in vocab]
+ for merge in merges:
+ vocab.append(''.join(merge))
+ if not special_tokens:
+ special_tokens = ['', '']
+ else:
+ special_tokens = ['', ''] + special_tokens
+ vocab.extend(special_tokens)
+ self.encoder = dict(zip(vocab, range(len(vocab))))
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {t:t for t in special_tokens}
+ special = "|".join(special_tokens)
+ self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
+
+ self.vocab_size = len(self.encoder)
+ self.all_special_ids = [self.encoder[t] for t in special_tokens]
+
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token[:-1]) + ( token[-1] + '',)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token+''
+
+ while True:
+ bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ new_word.extend(word[i:j])
+ i = j
+ except:
+ new_word.extend(word[i:])
+ break
+
+ if word[i] == first and i < len(word)-1 and word[i+1] == second:
+ new_word.append(first+second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = ' '.join(word)
+ self.cache[token] = word
+ return word
+
+ def encode(self, text):
+ bpe_tokens = []
+ text = whitespace_clean(basic_clean(text)).lower()
+ for token in re.findall(self.pat, text):
+ token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
+ bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
+ return bpe_tokens
+
+ def decode(self, tokens):
+ text = ''.join([self.decoder[token] for token in tokens])
+ text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('', ' ')
+ return text
+
+
+_tokenizer = SimpleTokenizer()
+
+
+def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
+ """
+ Returns the tokenized representation of given input string(s)
+
+ Parameters
+ ----------
+ texts : Union[str, List[str]]
+ An input string or a list of input strings to tokenize
+ context_length : int
+ The context length to use; all CLIP models use 77 as the context length
+
+ Returns
+ -------
+ A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
+ """
+ if isinstance(texts, str):
+ texts = [texts]
+
+ sot_token = _tokenizer.encoder[""]
+ eot_token = _tokenizer.encoder[""]
+ all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
+ result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
+
+ for i, tokens in enumerate(all_tokens):
+ if len(tokens) > context_length:
+ tokens = tokens[:context_length] # Truncate
+ tokens[-1] = eot_token
+ result[i, :len(tokens)] = torch.tensor(tokens)
+
+ return result
+
+
+class HFTokenizer:
+ "HuggingFace tokenizer wrapper"
+ def __init__(self, tokenizer_name:str):
+ from transformers import AutoTokenizer
+ self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
+
+ def __call__(self, texts:Union[str, List[str]], context_length:int=77) -> torch.Tensor:
+ # same cleaning as for default tokenizer, except lowercasing
+ # adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
+ if isinstance(texts, str):
+ texts = [texts]
+ texts = [whitespace_clean(basic_clean(text)) for text in texts]
+ input_ids = self.tokenizer(texts, return_tensors='pt', max_length=context_length, padding='max_length', truncation=True).input_ids
+ return input_ids
diff --git a/eva_clip/transform.py b/eva_clip/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..39f3e4cf6cf9985131ae2ef254b59540904b02e7
--- /dev/null
+++ b/eva_clip/transform.py
@@ -0,0 +1,103 @@
+from typing import Optional, Sequence, Tuple
+
+import torch
+import torch.nn as nn
+import torchvision.transforms.functional as F
+
+from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
+ CenterCrop
+
+from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
+
+
+class ResizeMaxSize(nn.Module):
+
+ def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
+ super().__init__()
+ if not isinstance(max_size, int):
+ raise TypeError(f"Size should be int. Got {type(max_size)}")
+ self.max_size = max_size
+ self.interpolation = interpolation
+ self.fn = min if fn == 'min' else min
+ self.fill = fill
+
+ def forward(self, img):
+ if isinstance(img, torch.Tensor):
+ height, width = img.shape[:2]
+ else:
+ width, height = img.size
+ scale = self.max_size / float(max(height, width))
+ if scale != 1.0:
+ new_size = tuple(round(dim * scale) for dim in (height, width))
+ img = F.resize(img, new_size, self.interpolation)
+ pad_h = self.max_size - new_size[0]
+ pad_w = self.max_size - new_size[1]
+ img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)
+ return img
+
+
+def _convert_to_rgb(image):
+ return image.convert('RGB')
+
+
+# class CatGen(nn.Module):
+# def __init__(self, num=4):
+# self.num = num
+# def mixgen_batch(image, text):
+# batch_size = image.shape[0]
+# index = np.random.permutation(batch_size)
+
+# cat_images = []
+# for i in range(batch_size):
+# # image mixup
+# image[i,:] = lam * image[i,:] + (1 - lam) * image[index[i],:]
+# # text concat
+# text[i] = tokenizer((str(text[i]) + " " + str(text[index[i]])))[0]
+# text = torch.stack(text)
+# return image, text
+
+
+def image_transform(
+ image_size: int,
+ is_train: bool,
+ mean: Optional[Tuple[float, ...]] = None,
+ std: Optional[Tuple[float, ...]] = None,
+ resize_longest_max: bool = False,
+ fill_color: int = 0,
+):
+ mean = mean or OPENAI_DATASET_MEAN
+ if not isinstance(mean, (list, tuple)):
+ mean = (mean,) * 3
+
+ std = std or OPENAI_DATASET_STD
+ if not isinstance(std, (list, tuple)):
+ std = (std,) * 3
+
+ if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
+ # for square size, pass size as int so that Resize() uses aspect preserving shortest edge
+ image_size = image_size[0]
+
+ normalize = Normalize(mean=mean, std=std)
+ if is_train:
+ return Compose([
+ RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
+ _convert_to_rgb,
+ ToTensor(),
+ normalize,
+ ])
+ else:
+ if resize_longest_max:
+ transforms = [
+ ResizeMaxSize(image_size, fill=fill_color)
+ ]
+ else:
+ transforms = [
+ Resize(image_size, interpolation=InterpolationMode.BICUBIC),
+ CenterCrop(image_size),
+ ]
+ transforms.extend([
+ _convert_to_rgb,
+ ToTensor(),
+ normalize,
+ ])
+ return Compose(transforms)
diff --git a/eva_clip/transformer.py b/eva_clip/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..33e89ff7aa8ff60ae65dcfc5d21cf9af4d214510
--- /dev/null
+++ b/eva_clip/transformer.py
@@ -0,0 +1,737 @@
+import os
+import logging
+from collections import OrderedDict
+import math
+from typing import Callable, Optional, Sequence
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+try:
+ from timm.models.layers import trunc_normal_
+except:
+ from timm.layers import trunc_normal_
+
+from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast
+from .utils import to_2tuple
+
+if os.getenv('ENV_TYPE') == 'deepspeed':
+ try:
+ import deepspeed
+ from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint
+ except:
+ print("Please 'pip install deepspeed'")
+ deepspeed = None
+ from torch.utils.checkpoint import checkpoint
+else:
+ from torch.utils.checkpoint import checkpoint
+
+try:
+ import xformers.ops as xops
+except ImportError:
+ xops = None
+ print("Please 'pip install xformers'")
+
+class LayerNormFp32(nn.LayerNorm):
+ """Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def forward(self, x: torch.Tensor):
+ output = F.layer_norm(
+ x.float(),
+ self.normalized_shape,
+ self.weight.float() if self.weight is not None else None,
+ self.bias.float() if self.bias is not None else None,
+ self.eps,
+ )
+ return output.type_as(x)
+
+
+class LayerNorm(nn.LayerNorm):
+ """Subclass torch's LayerNorm (with cast back to input dtype)."""
+
+ def forward(self, x: torch.Tensor):
+ orig_type = x.dtype
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
+ return x.to(orig_type)
+
+class QuickGELU(nn.Module):
+ # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
+ def forward(self, x: torch.Tensor):
+ return x * torch.sigmoid(1.702 * x)
+
+
+class LayerScale(nn.Module):
+ def __init__(self, dim, init_values=1e-5, inplace=False):
+ super().__init__()
+ self.inplace = inplace
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
+
+ def forward(self, x):
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
+
+class PatchDropout(nn.Module):
+ """
+ https://arxiv.org/abs/2212.00794
+ """
+
+ def __init__(self, prob, exclude_first_token=True):
+ super().__init__()
+ assert 0 <= prob < 1.
+ self.prob = prob
+ self.exclude_first_token = exclude_first_token # exclude CLS token
+ logging.info(f"os.getenv('RoPE')={os.getenv('RoPE')}")
+
+ def forward(self, x):
+ if not self.training or self.prob == 0.:
+ return x
+
+ if self.exclude_first_token:
+ cls_tokens, x = x[:, :1], x[:, 1:]
+ else:
+ cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
+
+ batch = x.size()[0]
+ num_tokens = x.size()[1]
+
+ batch_indices = torch.arange(batch)
+ batch_indices = batch_indices[..., None]
+
+ keep_prob = 1 - self.prob
+ num_patches_keep = max(1, int(num_tokens * keep_prob))
+
+ rand = torch.randn(batch, num_tokens)
+ patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
+
+ x = x[batch_indices, patch_indices_keep]
+
+ if self.exclude_first_token:
+ x = torch.cat((cls_tokens, x), dim=1)
+
+ if self.training and os.getenv('RoPE') == '1':
+ return x, patch_indices_keep
+
+ return x
+
+
+def _in_projection_packed(
+ q: torch.Tensor,
+ k: torch.Tensor,
+ v: torch.Tensor,
+ w: torch.Tensor,
+ b: Optional[torch.Tensor] = None,
+ ):
+ """
+ https://github.com/pytorch/pytorch/blob/db2a237763eb8693a20788be94f8c192e762baa8/torch/nn/functional.py#L4726
+ """
+ E = q.size(-1)
+ if k is v:
+ if q is k:
+ # self-attention
+ return F.linear(q, w, b).chunk(3, dim=-1)
+ else:
+ # encoder-decoder attention
+ w_q, w_kv = w.split([E, E * 2])
+ if b is None:
+ b_q = b_kv = None
+ else:
+ b_q, b_kv = b.split([E, E * 2])
+ return (F.linear(q, w_q, b_q),) + F.linear(k, w_kv, b_kv).chunk(2, dim=-1)
+ else:
+ w_q, w_k, w_v = w.chunk(3)
+ if b is None:
+ b_q = b_k = b_v = None
+ else:
+ b_q, b_k, b_v = b.chunk(3)
+ return F.linear(q, w_q, b_q), F.linear(k, w_k, b_k), F.linear(v, w_v, b_v)
+
+class Attention(nn.Module):
+ def __init__(
+ self,
+ dim,
+ num_heads=8,
+ qkv_bias=True,
+ scaled_cosine=False,
+ scale_heads=False,
+ logit_scale_max=math.log(1. / 0.01),
+ attn_drop=0.,
+ proj_drop=0.,
+ xattn=False,
+ rope=False
+ ):
+ super().__init__()
+ self.scaled_cosine = scaled_cosine
+ self.scale_heads = scale_heads
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.scale = self.head_dim ** -0.5
+ self.logit_scale_max = logit_scale_max
+
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
+ if qkv_bias:
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
+ else:
+ self.in_proj_bias = None
+
+ if self.scaled_cosine:
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
+ else:
+ self.logit_scale = None
+ self.attn_drop = nn.Dropout(attn_drop)
+ if self.scale_heads:
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
+ else:
+ self.head_scale = None
+ self.out_proj = nn.Linear(dim, dim)
+ self.out_drop = nn.Dropout(proj_drop)
+ self.xattn = xattn
+ self.xattn_drop = attn_drop
+ self.rope = rope
+
+ def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
+ L, N, C = x.shape
+ q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
+ if self.xattn:
+ q = q.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
+ k = k.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
+ v = v.contiguous().view(L, N, self.num_heads, -1).transpose(0, 1)
+
+ x = xops.memory_efficient_attention(
+ q, k, v,
+ p=self.xattn_drop,
+ scale=self.scale if self.logit_scale is None else None,
+ attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None,
+ )
+ else:
+ q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
+ k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
+ v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
+
+ if self.logit_scale is not None:
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
+ attn = attn.view(N, self.num_heads, L, L) * logit_scale
+ attn = attn.view(-1, L, L)
+ else:
+ q = q * self.scale
+ attn = torch.bmm(q, k.transpose(-1, -2))
+
+ if attn_mask is not None:
+ if attn_mask.dtype == torch.bool:
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
+ attn_mask = new_attn_mask
+ attn += attn_mask
+
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = torch.bmm(attn, v)
+
+ if self.head_scale is not None:
+ x = x.view(N, self.num_heads, L, C) * self.head_scale
+ x = x.view(-1, L, C)
+ x = x.transpose(0, 1).reshape(L, N, C)
+ x = self.out_proj(x)
+ x = self.out_drop(x)
+ return x
+
+class CustomAttention(nn.Module):
+ def __init__(
+ self,
+ dim,
+ num_heads=8,
+ qkv_bias=True,
+ scaled_cosine=True,
+ scale_heads=False,
+ logit_scale_max=math.log(1. / 0.01),
+ attn_drop=0.,
+ proj_drop=0.,
+ xattn=False
+ ):
+ super().__init__()
+ self.scaled_cosine = scaled_cosine
+ self.scale_heads = scale_heads
+ assert dim % num_heads == 0, 'dim should be divisible by num_heads'
+ self.num_heads = num_heads
+ self.head_dim = dim // num_heads
+ self.scale = self.head_dim ** -0.5
+ self.logit_scale_max = logit_scale_max
+
+ # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
+ self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
+ if qkv_bias:
+ self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
+ else:
+ self.in_proj_bias = None
+
+ if self.scaled_cosine:
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
+ else:
+ self.logit_scale = None
+ self.attn_drop = nn.Dropout(attn_drop)
+ if self.scale_heads:
+ self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
+ else:
+ self.head_scale = None
+ self.out_proj = nn.Linear(dim, dim)
+ self.out_drop = nn.Dropout(proj_drop)
+ self.xattn = xattn
+ self.xattn_drop = attn_drop
+
+ def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
+ q, k, v = _in_projection_packed(query, key, value, self.in_proj_weight, self.in_proj_bias)
+ N_q, B_q, C_q = q.shape
+ N_k, B_k, C_k = k.shape
+ N_v, B_v, C_v = v.shape
+ if self.xattn:
+ # B, N, C -> B, N, num_heads, C
+ q = q.permute(1, 0, 2).reshape(B_q, N_q, self.num_heads, -1)
+ k = k.permute(1, 0, 2).reshape(B_k, N_k, self.num_heads, -1)
+ v = v.permute(1, 0, 2).reshape(B_v, N_v, self.num_heads, -1)
+
+ x = xops.memory_efficient_attention(
+ q, k, v,
+ p=self.xattn_drop,
+ scale=self.scale if self.logit_scale is None else None,
+ attn_bias=xops.LowerTriangularMask() if attn_mask is not None else None
+ )
+ else:
+ # B*H, L, C
+ q = q.contiguous().view(N_q, B_q * self.num_heads, -1).transpose(0, 1)
+ k = k.contiguous().view(N_k, B_k * self.num_heads, -1).transpose(0, 1)
+ v = v.contiguous().view(N_v, B_v * self.num_heads, -1).transpose(0, 1)
+
+ if self.logit_scale is not None:
+ # B*H, N_q, N_k
+ attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
+ logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
+ attn = attn.view(B_q, self.num_heads, N_q, N_k) * logit_scale
+ attn = attn.view(-1, N_q, N_k)
+ else:
+ q = q * self.scale
+ attn = torch.bmm(q, k.transpose(-1, -2))
+
+ if attn_mask is not None:
+ if attn_mask.dtype == torch.bool:
+ new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
+ new_attn_mask.masked_fill_(attn_mask, float("-inf"))
+ attn_mask = new_attn_mask
+ attn += attn_mask
+
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = torch.bmm(attn, v)
+
+ if self.head_scale is not None:
+ x = x.view(B_q, self.num_heads, N_q, C_q) * self.head_scale
+ x = x.view(-1, N_q, C_q)
+ x = x.transpose(0, 1).reshape(N_q, B_q, C_q)
+ x = self.out_proj(x)
+ x = self.out_drop(x)
+ return x
+
+class CustomResidualAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ d_model: int,
+ n_head: int,
+ mlp_ratio: float = 4.0,
+ ls_init_value: float = None,
+ act_layer: Callable = nn.GELU,
+ norm_layer: Callable = LayerNorm,
+ scale_cosine_attn: bool = False,
+ scale_heads: bool = False,
+ scale_attn: bool = False,
+ scale_fc: bool = False,
+ cross_attn: bool = False,
+ xattn: bool = False,
+ ):
+ super().__init__()
+
+ self.ln_1 = norm_layer(d_model)
+ self.ln_1_k = norm_layer(d_model) if cross_attn else self.ln_1
+ self.ln_1_v = norm_layer(d_model) if cross_attn else self.ln_1
+ self.attn = CustomAttention(
+ d_model, n_head,
+ qkv_bias=True,
+ attn_drop=0.,
+ proj_drop=0.,
+ scaled_cosine=scale_cosine_attn,
+ scale_heads=scale_heads,
+ xattn=xattn
+ )
+
+ self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
+
+ self.ln_2 = norm_layer(d_model)
+ mlp_width = int(d_model * mlp_ratio)
+ self.mlp = nn.Sequential(OrderedDict([
+ ("c_fc", nn.Linear(d_model, mlp_width)),
+ ('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
+ ("gelu", act_layer()),
+ ("c_proj", nn.Linear(mlp_width, d_model))
+ ]))
+
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
+
+ def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
+ q = q + self.ls_1(self.ln_attn(self.attn(self.ln_1(q), self.ln_1_k(k), self.ln_1_v(v), attn_mask=attn_mask)))
+ q = q + self.ls_2(self.mlp(self.ln_2(q)))
+ return q
+
+class CustomTransformer(nn.Module):
+ def __init__(
+ self,
+ width: int,
+ layers: int,
+ heads: int,
+ mlp_ratio: float = 4.0,
+ ls_init_value: float = None,
+ act_layer: Callable = nn.GELU,
+ norm_layer: Callable = LayerNorm,
+ scale_cosine_attn: bool = True,
+ scale_heads: bool = False,
+ scale_attn: bool = False,
+ scale_fc: bool = False,
+ cross_attn: bool = False,
+ xattn: bool = False,
+ ):
+ super().__init__()
+ self.width = width
+ self.layers = layers
+ self.grad_checkpointing = False
+ self.xattn = xattn
+
+ self.resblocks = nn.ModuleList([
+ CustomResidualAttentionBlock(
+ width,
+ heads,
+ mlp_ratio,
+ ls_init_value=ls_init_value,
+ act_layer=act_layer,
+ norm_layer=norm_layer,
+ scale_cosine_attn=scale_cosine_attn,
+ scale_heads=scale_heads,
+ scale_attn=scale_attn,
+ scale_fc=scale_fc,
+ cross_attn=cross_attn,
+ xattn=xattn)
+ for _ in range(layers)
+ ])
+
+ def get_cast_dtype(self) -> torch.dtype:
+ return self.resblocks[0].mlp.c_fc.weight.dtype
+
+ def forward(self, q: torch.Tensor, k: torch.Tensor = None, v: torch.Tensor = None, attn_mask: Optional[torch.Tensor] = None):
+ if k is None and v is None:
+ k = v = q
+ for r in self.resblocks:
+ if self.grad_checkpointing and not torch.jit.is_scripting():
+ q = checkpoint(r, q, k, v, attn_mask)
+ else:
+ q = r(q, k, v, attn_mask=attn_mask)
+ return q
+
+
+class ResidualAttentionBlock(nn.Module):
+ def __init__(
+ self,
+ d_model: int,
+ n_head: int,
+ mlp_ratio: float = 4.0,
+ ls_init_value: float = None,
+ act_layer: Callable = nn.GELU,
+ norm_layer: Callable = LayerNorm,
+ xattn: bool = False,
+ ):
+ super().__init__()
+
+ self.ln_1 = norm_layer(d_model)
+ if xattn:
+ self.attn = Attention(d_model, n_head, xattn=True)
+ else:
+ self.attn = nn.MultiheadAttention(d_model, n_head)
+ self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
+
+ self.ln_2 = norm_layer(d_model)
+ mlp_width = int(d_model * mlp_ratio)
+ self.mlp = nn.Sequential(OrderedDict([
+ ("c_fc", nn.Linear(d_model, mlp_width)),
+ ("gelu", act_layer()),
+ ("c_proj", nn.Linear(mlp_width, d_model))
+ ]))
+
+ self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
+ self.xattn = xattn
+
+ def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
+ attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
+ if self.xattn:
+ return self.attn(x, attn_mask=attn_mask)
+ return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
+
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
+ x = x + self.ls_1(self.attention(self.ln_1(x), attn_mask=attn_mask))
+ x = x + self.ls_2(self.mlp(self.ln_2(x)))
+ return x
+
+class Transformer(nn.Module):
+ def __init__(
+ self,
+ width: int,
+ layers: int,
+ heads: int,
+ mlp_ratio: float = 4.0,
+ ls_init_value: float = None,
+ act_layer: Callable = nn.GELU,
+ norm_layer: Callable = LayerNorm,
+ xattn: bool = False,
+ ):
+ super().__init__()
+ self.width = width
+ self.layers = layers
+ self.grad_checkpointing = False
+
+ self.resblocks = nn.ModuleList([
+ ResidualAttentionBlock(
+ width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer, xattn=xattn)
+ for _ in range(layers)
+ ])
+
+ def get_cast_dtype(self) -> torch.dtype:
+ return self.resblocks[0].mlp.c_fc.weight.dtype
+
+ def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
+ for r in self.resblocks:
+ if self.grad_checkpointing and not torch.jit.is_scripting():
+ x = checkpoint(r, x, attn_mask)
+ else:
+ x = r(x, attn_mask=attn_mask)
+ return x
+
+
+class VisionTransformer(nn.Module):
+ def __init__(
+ self,
+ image_size: int,
+ patch_size: int,
+ width: int,
+ layers: int,
+ heads: int,
+ mlp_ratio: float,
+ ls_init_value: float = None,
+ patch_dropout: float = 0.,
+ global_average_pool: bool = False,
+ output_dim: int = 512,
+ act_layer: Callable = nn.GELU,
+ norm_layer: Callable = LayerNorm,
+ xattn: bool = False,
+ ):
+ super().__init__()
+ self.image_size = to_2tuple(image_size)
+ self.patch_size = to_2tuple(patch_size)
+ self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])
+ self.output_dim = output_dim
+ self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
+
+ scale = width ** -0.5
+ self.class_embedding = nn.Parameter(scale * torch.randn(width))
+ self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
+
+ # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
+ self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
+ self.ln_pre = norm_layer(width)
+
+ self.transformer = Transformer(
+ width,
+ layers,
+ heads,
+ mlp_ratio,
+ ls_init_value=ls_init_value,
+ act_layer=act_layer,
+ norm_layer=norm_layer,
+ xattn=xattn
+ )
+
+ self.global_average_pool = global_average_pool
+ self.ln_post = norm_layer(width)
+ self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
+
+ def lock(self, unlocked_groups=0, freeze_bn_stats=False):
+ for param in self.parameters():
+ param.requires_grad = False
+
+ if unlocked_groups != 0:
+ groups = [
+ [
+ self.conv1,
+ self.class_embedding,
+ self.positional_embedding,
+ self.ln_pre,
+ ],
+ *self.transformer.resblocks[:-1],
+ [
+ self.transformer.resblocks[-1],
+ self.ln_post,
+ ],
+ self.proj,
+ ]
+
+ def _unlock(x):
+ if isinstance(x, Sequence):
+ for g in x:
+ _unlock(g)
+ else:
+ if isinstance(x, torch.nn.Parameter):
+ x.requires_grad = True
+ else:
+ for p in x.parameters():
+ p.requires_grad = True
+
+ _unlock(groups[-unlocked_groups:])
+
+ def get_num_layers(self):
+ return self.transformer.layers
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.transformer.grad_checkpointing = enable
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'positional_embedding', 'class_embedding'}
+
+ def forward(self, x: torch.Tensor, return_all_features: bool=False):
+ x = self.conv1(x) # shape = [*, width, grid, grid]
+ x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
+ x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
+ x = torch.cat(
+ [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
+ x], dim=1) # shape = [*, grid ** 2 + 1, width]
+ x = x + self.positional_embedding.to(x.dtype)
+
+ # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
+ x = self.patch_dropout(x)
+ x = self.ln_pre(x)
+
+ x = x.permute(1, 0, 2) # NLD -> LND
+ x = self.transformer(x)
+ x = x.permute(1, 0, 2) # LND -> NLD
+
+ if not return_all_features:
+ if self.global_average_pool:
+ x = x.mean(dim=1) #x = x[:,1:,:].mean(dim=1)
+ else:
+ x = x[:, 0]
+
+ x = self.ln_post(x)
+
+ if self.proj is not None:
+ x = x @ self.proj
+
+ return x
+
+
+class TextTransformer(nn.Module):
+ def __init__(
+ self,
+ context_length: int = 77,
+ vocab_size: int = 49408,
+ width: int = 512,
+ heads: int = 8,
+ layers: int = 12,
+ ls_init_value: float = None,
+ output_dim: int = 512,
+ act_layer: Callable = nn.GELU,
+ norm_layer: Callable = LayerNorm,
+ xattn: bool= False,
+ attn_mask: bool = True
+ ):
+ super().__init__()
+ self.context_length = context_length
+ self.vocab_size = vocab_size
+ self.width = width
+ self.output_dim = output_dim
+
+ self.token_embedding = nn.Embedding(vocab_size, width)
+ self.positional_embedding = nn.Parameter(torch.empty(self.context_length, width))
+ self.transformer = Transformer(
+ width=width,
+ layers=layers,
+ heads=heads,
+ ls_init_value=ls_init_value,
+ act_layer=act_layer,
+ norm_layer=norm_layer,
+ xattn=xattn
+ )
+
+ self.xattn = xattn
+ self.ln_final = norm_layer(width)
+ self.text_projection = nn.Parameter(torch.empty(width, output_dim))
+
+ if attn_mask:
+ self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
+ else:
+ self.attn_mask = None
+
+ self.init_parameters()
+
+ def init_parameters(self):
+ nn.init.normal_(self.token_embedding.weight, std=0.02)
+ nn.init.normal_(self.positional_embedding, std=0.01)
+
+ proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
+ attn_std = self.transformer.width ** -0.5
+ fc_std = (2 * self.transformer.width) ** -0.5
+ for block in self.transformer.resblocks:
+ nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
+ nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
+ nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
+ nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
+
+ if self.text_projection is not None:
+ nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
+
+ @torch.jit.ignore
+ def set_grad_checkpointing(self, enable=True):
+ self.transformer.grad_checkpointing = enable
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ # return {'positional_embedding', 'token_embedding'}
+ return {'positional_embedding'}
+
+ def get_num_layers(self):
+ return self.transformer.layers
+
+ def build_attention_mask(self):
+ # lazily create causal attention mask, with full attention between the vision tokens
+ # pytorch uses additive attention mask; fill with -inf
+ mask = torch.empty(self.context_length, self.context_length)
+ mask.fill_(float("-inf"))
+ mask.triu_(1) # zero out the lower diagonal
+ return mask
+
+ def forward(self, text, return_all_features: bool=False):
+ cast_dtype = self.transformer.get_cast_dtype()
+ x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
+
+ x = x + self.positional_embedding.to(cast_dtype)
+ x = x.permute(1, 0, 2) # NLD -> LND
+ x = self.transformer(x, attn_mask=self.attn_mask)
+ # x = self.transformer(x) # no attention mask is applied
+ x = x.permute(1, 0, 2) # LND -> NLD
+ x = self.ln_final(x)
+
+ if not return_all_features:
+ # x.shape = [batch_size, n_ctx, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
+ return x
diff --git a/eva_clip/utils.py b/eva_clip/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdc5a7a451fdf8911ebbc816afbd2664ff348836
--- /dev/null
+++ b/eva_clip/utils.py
@@ -0,0 +1,326 @@
+from itertools import repeat
+import collections.abc
+import logging
+import math
+import numpy as np
+
+import torch
+from torch import nn as nn
+from torchvision.ops.misc import FrozenBatchNorm2d
+import torch.nn.functional as F
+
+# open CLIP
+def resize_clip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
+ # Rescale the grid of position embeddings when loading from state_dict
+ old_pos_embed = state_dict.get('visual.positional_embedding', None)
+ if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
+ return
+ grid_size = to_2tuple(model.visual.grid_size)
+ extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
+ new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
+ if new_seq_len == old_pos_embed.shape[0]:
+ return
+
+ if extra_tokens:
+ pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
+ else:
+ pos_emb_tok, pos_emb_img = None, old_pos_embed
+ old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
+
+ logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
+ pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
+ pos_emb_img = F.interpolate(
+ pos_emb_img,
+ size=grid_size,
+ mode=interpolation,
+ align_corners=True,
+ )
+ pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
+ if pos_emb_tok is not None:
+ new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
+ else:
+ new_pos_embed = pos_emb_img
+ state_dict['visual.positional_embedding'] = new_pos_embed
+
+
+def resize_visual_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
+ # Rescale the grid of position embeddings when loading from state_dict
+ old_pos_embed = state_dict.get('positional_embedding', None)
+ if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
+ return
+ grid_size = to_2tuple(model.visual.grid_size)
+ extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
+ new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
+ if new_seq_len == old_pos_embed.shape[0]:
+ return
+
+ if extra_tokens:
+ pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
+ else:
+ pos_emb_tok, pos_emb_img = None, old_pos_embed
+ old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
+
+ logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
+ pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
+ pos_emb_img = F.interpolate(
+ pos_emb_img,
+ size=grid_size,
+ mode=interpolation,
+ align_corners=True,
+ )
+ pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
+ if pos_emb_tok is not None:
+ new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
+ else:
+ new_pos_embed = pos_emb_img
+ state_dict['positional_embedding'] = new_pos_embed
+
+def resize_evaclip_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
+ all_keys = list(state_dict.keys())
+ # interpolate position embedding
+ if 'visual.pos_embed' in state_dict:
+ pos_embed_checkpoint = state_dict['visual.pos_embed']
+ embedding_size = pos_embed_checkpoint.shape[-1]
+ num_patches = model.visual.patch_embed.num_patches
+ num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
+ # height (== width) for the checkpoint position embedding
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
+ # height (== width) for the new position embedding
+ new_size = int(num_patches ** 0.5)
+ # class_token and dist_token are kept unchanged
+ if orig_size != new_size:
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
+ # only the position tokens are interpolated
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
+ pos_tokens = torch.nn.functional.interpolate(
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
+ state_dict['visual.pos_embed'] = new_pos_embed
+
+ patch_embed_proj = state_dict['visual.patch_embed.proj.weight']
+ patch_size = model.visual.patch_embed.patch_size
+ state_dict['visual.patch_embed.proj.weight'] = torch.nn.functional.interpolate(
+ patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
+
+
+def resize_eva_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
+ all_keys = list(state_dict.keys())
+ # interpolate position embedding
+ if 'pos_embed' in state_dict:
+ pos_embed_checkpoint = state_dict['pos_embed']
+ embedding_size = pos_embed_checkpoint.shape[-1]
+ num_patches = model.visual.patch_embed.num_patches
+ num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
+ # height (== width) for the checkpoint position embedding
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
+ # height (== width) for the new position embedding
+ new_size = int(num_patches ** 0.5)
+ # class_token and dist_token are kept unchanged
+ if orig_size != new_size:
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
+ # only the position tokens are interpolated
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
+ pos_tokens = torch.nn.functional.interpolate(
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
+ state_dict['pos_embed'] = new_pos_embed
+
+ patch_embed_proj = state_dict['patch_embed.proj.weight']
+ patch_size = model.visual.patch_embed.patch_size
+ state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
+ patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
+
+
+def resize_rel_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1):
+ all_keys = list(state_dict.keys())
+ for key in all_keys:
+ if "relative_position_index" in key:
+ state_dict.pop(key)
+
+ if "relative_position_bias_table" in key:
+ rel_pos_bias = state_dict[key]
+ src_num_pos, num_attn_heads = rel_pos_bias.size()
+ dst_num_pos, _ = model.visual.state_dict()[key].size()
+ dst_patch_shape = model.visual.patch_embed.patch_shape
+ if dst_patch_shape[0] != dst_patch_shape[1]:
+ raise NotImplementedError()
+ num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
+ src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
+ dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
+ if src_size != dst_size:
+ print("Position interpolate for %s from %dx%d to %dx%d" % (
+ key, src_size, src_size, dst_size, dst_size))
+ extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
+ rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
+
+ def geometric_progression(a, r, n):
+ return a * (1.0 - r ** n) / (1.0 - r)
+
+ left, right = 1.01, 1.5
+ while right - left > 1e-6:
+ q = (left + right) / 2.0
+ gp = geometric_progression(1, q, src_size // 2)
+ if gp > dst_size // 2:
+ right = q
+ else:
+ left = q
+
+ # if q > 1.090307:
+ # q = 1.090307
+
+ dis = []
+ cur = 1
+ for i in range(src_size // 2):
+ dis.append(cur)
+ cur += q ** (i + 1)
+
+ r_ids = [-_ for _ in reversed(dis)]
+
+ x = r_ids + [0] + dis
+ y = r_ids + [0] + dis
+
+ t = dst_size // 2.0
+ dx = np.arange(-t, t + 0.1, 1.0)
+ dy = np.arange(-t, t + 0.1, 1.0)
+
+ print("Original positions = %s" % str(x))
+ print("Target positions = %s" % str(dx))
+
+ all_rel_pos_bias = []
+
+ for i in range(num_attn_heads):
+ z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
+ f = F.interpolate.interp2d(x, y, z, kind='cubic')
+ all_rel_pos_bias.append(
+ torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
+
+ rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
+
+ new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
+ state_dict[key] = new_rel_pos_bias
+
+ # interpolate position embedding
+ if 'pos_embed' in state_dict:
+ pos_embed_checkpoint = state_dict['pos_embed']
+ embedding_size = pos_embed_checkpoint.shape[-1]
+ num_patches = model.visual.patch_embed.num_patches
+ num_extra_tokens = model.visual.pos_embed.shape[-2] - num_patches
+ # height (== width) for the checkpoint position embedding
+ orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
+ # height (== width) for the new position embedding
+ new_size = int(num_patches ** 0.5)
+ # class_token and dist_token are kept unchanged
+ if orig_size != new_size:
+ print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
+ extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
+ # only the position tokens are interpolated
+ pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
+ pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
+ pos_tokens = torch.nn.functional.interpolate(
+ pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
+ pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
+ new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
+ state_dict['pos_embed'] = new_pos_embed
+
+ patch_embed_proj = state_dict['patch_embed.proj.weight']
+ patch_size = model.visual.patch_embed.patch_size
+ state_dict['patch_embed.proj.weight'] = torch.nn.functional.interpolate(
+ patch_embed_proj.float(), size=patch_size, mode='bicubic', align_corners=False)
+
+
+def freeze_batch_norm_2d(module, module_match={}, name=''):
+ """
+ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
+ itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
+ returned. Otherwise, the module is walked recursively and submodules are converted in place.
+
+ Args:
+ module (torch.nn.Module): Any PyTorch module.
+ module_match (dict): Dictionary of full module names to freeze (all if empty)
+ name (str): Full module name (prefix)
+
+ Returns:
+ torch.nn.Module: Resulting module
+
+ Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
+ """
+ res = module
+ is_match = True
+ if module_match:
+ is_match = name in module_match
+ if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
+ res = FrozenBatchNorm2d(module.num_features)
+ res.num_features = module.num_features
+ res.affine = module.affine
+ if module.affine:
+ res.weight.data = module.weight.data.clone().detach()
+ res.bias.data = module.bias.data.clone().detach()
+ res.running_mean.data = module.running_mean.data
+ res.running_var.data = module.running_var.data
+ res.eps = module.eps
+ else:
+ for child_name, child in module.named_children():
+ full_child_name = '.'.join([name, child_name]) if name else child_name
+ new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
+ if new_child is not child:
+ res.add_module(child_name, new_child)
+ return res
+
+
+# From PyTorch internals
+def _ntuple(n):
+ def parse(x):
+ if isinstance(x, collections.abc.Iterable):
+ return x
+ return tuple(repeat(x, n))
+ return parse
+
+
+to_1tuple = _ntuple(1)
+to_2tuple = _ntuple(2)
+to_3tuple = _ntuple(3)
+to_4tuple = _ntuple(4)
+to_ntuple = lambda n, x: _ntuple(n)(x)
+
+
+def is_logging(args):
+ def is_global_master(args):
+ return args.rank == 0
+
+ def is_local_master(args):
+ return args.local_rank == 0
+
+ def is_master(args, local=False):
+ return is_local_master(args) if local else is_global_master(args)
+ return is_master
+
+
+class AllGather(torch.autograd.Function):
+ """An autograd function that performs allgather on a tensor.
+ Performs all_gather operation on the provided tensors.
+ *** Warning ***: torch.distributed.all_gather has no gradient.
+ """
+
+ @staticmethod
+ def forward(ctx, tensor, rank, world_size):
+ tensors_gather = [torch.empty_like(tensor) for _ in range(world_size)]
+ torch.distributed.all_gather(tensors_gather, tensor)
+ ctx.rank = rank
+ ctx.batch_size = tensor.shape[0]
+ return torch.cat(tensors_gather, 0)
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ return (
+ grad_output[ctx.batch_size * ctx.rank: ctx.batch_size * (ctx.rank + 1)],
+ None,
+ None
+ )
+
+allgather = AllGather.apply
\ No newline at end of file
diff --git a/example_inputs/unsplash/baruk-granda-cfLL_jHQ-Iw-unsplash.jpg b/example_inputs/unsplash/baruk-granda-cfLL_jHQ-Iw-unsplash.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..a33c8ee56b056f8eff86138d6e7700055325d0df
--- /dev/null
+++ b/example_inputs/unsplash/baruk-granda-cfLL_jHQ-Iw-unsplash.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1e54dc1ef8c74b864bba035db463e5641f2e1bfd0c5c4822bd68ad784deef42
+size 692842
diff --git a/example_inputs/unsplash/gus-tu-njana-Mf4MN7MZqcE-unsplash.jpg b/example_inputs/unsplash/gus-tu-njana-Mf4MN7MZqcE-unsplash.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..cfa1d423b9f1959b9608784fd9d5f40cffe2bd2e
--- /dev/null
+++ b/example_inputs/unsplash/gus-tu-njana-Mf4MN7MZqcE-unsplash.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:18bbe5f775610ed3f0bb2bd34317b6b8343b6cd2e48491f1534b973d9b33603d
+size 219870
diff --git a/example_inputs/unsplash/lhon-karwan-11tbHtK5STE-unsplash.jpg b/example_inputs/unsplash/lhon-karwan-11tbHtK5STE-unsplash.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..554d4014fc5fb526c08fa926a3e3809daf52ea49
--- /dev/null
+++ b/example_inputs/unsplash/lhon-karwan-11tbHtK5STE-unsplash.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:56a5d3914c99e0d0afa963e461b71289dd1dcc17d410db0add513d9e1b09e913
+size 960171
diff --git a/example_inputs/unsplash/masoud-razeghi--qsrZhXPius-unsplash.jpg b/example_inputs/unsplash/masoud-razeghi--qsrZhXPius-unsplash.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..c884d6a596804b6782759de7e93aa743888acb42
--- /dev/null
+++ b/example_inputs/unsplash/masoud-razeghi--qsrZhXPius-unsplash.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4bf6490c8ec93bdf5916e7dccb8e82db15edba5688a28c80a7a41323b4e834dc
+size 423211
diff --git a/example_inputs/unsplash/rahmat-alizada-7PwFKOgyoKo-unsplash.jpg b/example_inputs/unsplash/rahmat-alizada-7PwFKOgyoKo-unsplash.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..7dc0813c7f1e57c07ec462bf336eb0a526fe48b6
--- /dev/null
+++ b/example_inputs/unsplash/rahmat-alizada-7PwFKOgyoKo-unsplash.jpg
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:29dbe9cb093f7ff15021c94f816a4be88dce996c449f7b8203144c3d46647604
+size 2040909
diff --git a/flux/__init__.py b/flux/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..43c365a49d6980e88acba10ef3069f110a59644a
--- /dev/null
+++ b/flux/__init__.py
@@ -0,0 +1,11 @@
+try:
+ from ._version import version as __version__ # type: ignore
+ from ._version import version_tuple
+except ImportError:
+ __version__ = "unknown (no version information available)"
+ version_tuple = (0, 0, "unknown", "noinfo")
+
+from pathlib import Path
+
+PACKAGE = __package__.replace("_", "-")
+PACKAGE_ROOT = Path(__file__).parent
diff --git a/flux/image_utils.py b/flux/image_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3ab7eb8d91bd4629214fc73d7ffd23da81885a4
--- /dev/null
+++ b/flux/image_utils.py
@@ -0,0 +1,210 @@
+from PIL import Image, ImageDraw, ImageFont
+import os
+import torch
+import glob
+import matplotlib.pyplot as plt
+
+def read_images_in_path(path, size = (512,512)):
+ image_paths = []
+ for filename in os.listdir(path):
+ if filename.endswith(".png") or filename.endswith(".jpg") or filename.endswith(".jpeg"):
+ image_path = os.path.join(path, filename)
+ image_paths.append(image_path)
+ image_paths = sorted(image_paths)
+ return [Image.open(image_path).convert("RGB").resize(size) for image_path in image_paths]
+
+def concatenate_images(image_lists, return_list = False):
+ num_rows = len(image_lists[0])
+ num_columns = len(image_lists)
+ image_width = image_lists[0][0].width
+ image_height = image_lists[0][0].height
+
+ grid_width = num_columns * image_width
+ grid_height = num_rows * image_height if not return_list else image_height
+ if not return_list:
+ grid_image = [Image.new('RGB', (grid_width, grid_height))]
+ else:
+ grid_image = [Image.new('RGB', (grid_width, grid_height)) for i in range(num_rows)]
+
+ for i in range(num_rows):
+ row_index = i if return_list else 0
+ for j in range(num_columns):
+ image = image_lists[j][i]
+ x_offset = j * image_width
+ y_offset = i * image_height if not return_list else 0
+ grid_image[row_index].paste(image, (x_offset, y_offset))
+
+ return grid_image if return_list else grid_image[0]
+
+def concatenate_images_single(image_lists):
+ num_columns = len(image_lists)
+ image_width = image_lists[0].width
+ image_height = image_lists[0].height
+
+ grid_width = num_columns * image_width
+ grid_height = image_height
+ grid_image = Image.new('RGB', (grid_width, grid_height))
+
+ for j in range(num_columns):
+ image = image_lists[j]
+ x_offset = j * image_width
+ y_offset = 0
+ grid_image.paste(image, (x_offset, y_offset))
+
+ return grid_image
+
+def get_captions_for_images(images, device):
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
+
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
+ model = Blip2ForConditionalGeneration.from_pretrained(
+ "Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0}, torch_dtype=torch.float16
+ ) # doctest: +IGNORE_RESULT
+
+ res = []
+
+ for image in images:
+ inputs = processor(images=image, return_tensors="pt").to(device, torch.float16)
+
+ generated_ids = model.generate(**inputs)
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
+ res.append(generated_text)
+
+ del processor
+ del model
+
+ return res
+
+def find_and_plot_images(directory, output_file, recursive=True, figsize=(15, 15), image_formats=("*.png", "*.jpg", "*.jpeg", "*.bmp", "*.tiff")):
+ """
+ Finds all images in the specified directory (optionally recursively)
+ and saves them in a single figure with their filenames.
+
+ Parameters:
+ directory (str): Path to the directory.
+ output_file (str): Path to save the resulting figure (e.g., 'output.png').
+ recursive (bool): Whether to search directories recursively.
+ figsize (tuple): Size of the resulting figure.
+ image_formats (tuple): Image file formats to look for.
+
+ Returns:
+ None
+ """
+ # Gather all image file paths
+ pattern = "**/" if recursive else ""
+ images = []
+ for fmt in image_formats:
+ images.extend(glob.glob(os.path.join(directory, pattern + fmt), recursive=recursive))
+
+ images = [image for image in images if "noise.jpg" not in image and "results.jpg" not in image] # Filter out noise and result images
+ # move "original" to the front, followed by "reconstruction" and then the rest
+ images = sorted(
+ images,
+ key=lambda x: (not x.endswith("original.jpg"), not x.endswith("reconstruction.jpg"), x)
+ )
+
+ if not images:
+ print("No images found!")
+ return
+
+ # Create a figure
+ num_images = len(images)
+ cols = num_images # Max 5 images per row
+ rows = (num_images + cols - 1) // cols # Calculate number of rows
+ fig, axs = plt.subplots(rows, cols, figsize=figsize)
+ axs = axs.flatten() if num_images > 1 else [axs] # Flatten axes for single image case
+
+ for i, image_path in enumerate(images):
+ # Open and plot image
+ img = Image.open(image_path)
+ axs[i].imshow(img)
+ axs[i].axis('off') # Remove axes
+ axs[i].set_title(os.path.basename(image_path), fontsize=8) # Add filename
+
+ # Hide any remaining empty axes
+ for j in range(i + 1, len(axs)):
+ axs[j].axis('off')
+
+ plt.tight_layout()
+ plt.savefig(output_file, bbox_inches='tight', dpi=300) # Save the figure to the file
+ plt.close(fig) # Close the figure to free up memory
+ print(f"Figure saved to {output_file}")
+
+
+def add_label_to_image(image, label):
+ """
+ Adds a label to the lower-right corner of an image.
+
+ Args:
+ image (PIL.Image): Image to add the label to.
+ label (str): Text to add as a label.
+
+ Returns:
+ PIL.Image: Image with the added label.
+ """
+ # Create a drawing context
+ draw = ImageDraw.Draw(image)
+
+
+ # Create a drawing context
+ draw = ImageDraw.Draw(image)
+
+ # Define font and size
+ font_size = int(min(image.size) * 0.05) # Adjust font size based on image dimensions
+ try:
+ font = ImageFont.truetype("fonts/arial.ttf", font_size) # Replace with a font path if needed
+ except IOError:
+ font = ImageFont.load_default() # Fallback to default font if arial.ttf is not found
+
+ # Measure text size using textbbox
+ text_bbox = draw.textbbox((0, 0), label, font=font) # (left, top, right, bottom)
+ text_width = text_bbox[2] - text_bbox[0]
+ text_height = text_bbox[3] - text_bbox[1]
+
+ # Position the text in the lower-right corner with some padding
+ padding = 10
+ position = (image.width - text_width - padding, image.height - text_height - padding)
+
+ # Add a semi-transparent background for the label
+ draw.rectangle(
+ [
+ (position[0] - padding, position[1] - padding),
+ (position[0] + text_width + padding, position[1] + text_height + padding)
+ ],
+ fill=(0, 0, 0, 150) # Black with transparency
+ )
+
+ # Draw the label
+ draw.text(position, label, fill="white", font=font)
+
+ return image
+
+def crop_center_square_and_resize(img, size, output_path=None):
+ """
+ Crops the center of an image to make it square.
+
+ Args:
+ img (PIL.Image): Image to crop.
+ output_path (str, optional): Path to save the cropped image. If None, the cropped image is not saved.
+
+ Returns:
+ Image: The cropped square image.
+ """
+ width, height = img.size
+ # Determine the shorter side
+ side_length = min(width, height)
+ # Calculate the cropping box
+ left = (width - side_length) // 2
+ top = (height - side_length) // 2
+ right = left + side_length
+ bottom = top + side_length
+ # Crop the image
+ cropped_img = img.crop((left, top, right, bottom))
+ # Resize the image
+ cropped_img = cropped_img.resize(size)
+
+ # Save the cropped image if output path is specified
+ if output_path:
+ cropped_img.save(output_path)
+
+ return cropped_img
diff --git a/flux/math.py b/flux/math.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c76bdc7f20b694a559f74a617adb14c19fbb3d0
--- /dev/null
+++ b/flux/math.py
@@ -0,0 +1,31 @@
+import torch
+from einops import rearrange
+from torch import Tensor
+
+
+def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor) -> Tensor:
+ if pe is not None:
+ q, k = apply_rope(q, k, pe)
+
+ x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
+ x = rearrange(x, "B H L D -> B L (H D)")
+
+ return x
+
+
+def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
+ assert dim % 2 == 0
+ scale = torch.arange(0, dim, 2, dtype=torch.float64, device=pos.device) / dim
+ omega = 1.0 / (theta**scale)
+ out = torch.einsum("...n,d->...nd", pos, omega)
+ out = torch.stack([torch.cos(out), -torch.sin(out), torch.sin(out), torch.cos(out)], dim=-1)
+ out = rearrange(out, "b n d (i j) -> b n d i j", i=2, j=2)
+ return out.float()
+
+
+def apply_rope(xq: Tensor, xk: Tensor, freqs_cis: Tensor) -> tuple[Tensor, Tensor]:
+ xq_ = xq.float().reshape(*xq.shape[:-1], -1, 1, 2)
+ xk_ = xk.float().reshape(*xk.shape[:-1], -1, 1, 2)
+ xq_out = freqs_cis[..., 0] * xq_[..., 0] + freqs_cis[..., 1] * xq_[..., 1]
+ xk_out = freqs_cis[..., 0] * xk_[..., 0] + freqs_cis[..., 1] * xk_[..., 1]
+ return xq_out.reshape(*xq.shape).type_as(xq), xk_out.reshape(*xk.shape).type_as(xk)
diff --git a/flux/model.py b/flux/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e62eb11cbd7c2095f72a591469c92844de1b2d0
--- /dev/null
+++ b/flux/model.py
@@ -0,0 +1,165 @@
+from dataclasses import dataclass
+
+import torch
+from torch import Tensor, nn
+
+from flux.modules.layers import (
+ DoubleStreamBlock,
+ EmbedND,
+ LastLayer,
+ MLPEmbedder,
+ SingleStreamBlock,
+ timestep_embedding,
+)
+
+DEVICE = torch.device("cuda")
+
+@dataclass
+class FluxParams:
+ in_channels: int
+ vec_in_dim: int
+ context_in_dim: int
+ hidden_size: int
+ mlp_ratio: float
+ num_heads: int
+ depth: int
+ depth_single_blocks: int
+ axes_dim: list[int]
+ theta: int
+ qkv_bias: bool
+ guidance_embed: bool
+
+
+class Flux(nn.Module):
+ """
+ Transformer model for flow matching on sequences.
+ """
+
+ def __init__(self, params: FluxParams):
+ super().__init__()
+
+ self.params = params
+ self.in_channels = params.in_channels
+ self.out_channels = self.in_channels
+ if params.hidden_size % params.num_heads != 0:
+ raise ValueError(
+ f"Hidden size {params.hidden_size} must be divisible by num_heads {params.num_heads}"
+ )
+ pe_dim = params.hidden_size // params.num_heads
+ if sum(params.axes_dim) != pe_dim:
+ raise ValueError(f"Got {params.axes_dim} but expected positional dim {pe_dim}")
+ self.hidden_size = params.hidden_size
+ self.num_heads = params.num_heads
+ self.pe_embedder = EmbedND(dim=pe_dim, theta=params.theta, axes_dim=params.axes_dim)
+ self.img_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
+ self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
+ self.vector_in = MLPEmbedder(params.vec_in_dim, self.hidden_size)
+ self.guidance_in = (
+ MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size) if params.guidance_embed else nn.Identity()
+ )
+ self.txt_in = nn.Linear(params.context_in_dim, self.hidden_size)
+
+ self.double_blocks = nn.ModuleList(
+ [
+ DoubleStreamBlock(
+ self.hidden_size,
+ self.num_heads,
+ mlp_ratio=params.mlp_ratio,
+ qkv_bias=params.qkv_bias,
+ ).to(torch.bfloat16)
+ for _ in range(params.depth)
+ ]
+ )
+
+ self.single_blocks = nn.ModuleList(
+ [
+ SingleStreamBlock(self.hidden_size, self.num_heads, mlp_ratio=params.mlp_ratio).to(torch.bfloat16)
+ for _ in range(params.depth_single_blocks)
+ ]
+ )
+
+ self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
+
+ self.pulid_ca = None
+ self.pulid_double_interval = 2
+ self.pulid_single_interval = 4
+
+ def forward(
+ self,
+ img: Tensor,
+ img_ids: Tensor,
+ txt: Tensor,
+ txt_ids: Tensor,
+ timesteps: Tensor,
+ y: Tensor,
+ guidance: Tensor = None,
+ id: Tensor = None,
+ id_weight: float = 1.0,
+ aggressive_offload: bool = False,
+ ) -> Tensor:
+ if img.ndim != 3 or txt.ndim != 3:
+ raise ValueError("Input img and txt tensors must have 3 dimensions.")
+
+ # running on sequences img
+ img = self.img_in(img)
+ vec = self.time_in(timestep_embedding(timesteps, 256))
+ if self.params.guidance_embed:
+ if guidance is None:
+ raise ValueError("Didn't get guidance strength for guidance distilled model.")
+ vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
+ vec = vec + self.vector_in(y)
+ txt = self.txt_in(txt)
+
+ ids = torch.cat((txt_ids, img_ids), dim=1)
+ pe = self.pe_embedder(ids)
+
+ ca_idx = 0
+ if aggressive_offload:
+ self.double_blocks = self.double_blocks.to(DEVICE)
+ for i, block in enumerate(self.double_blocks):
+ img, txt = block(img=img, txt=txt, vec=vec, pe=pe)
+
+ if i % self.pulid_double_interval == 0 and id is not None:
+ img = img + id_weight * self.pulid_ca[ca_idx](id, img)
+ ca_idx += 1
+ if aggressive_offload:
+ self.double_blocks.cpu()
+
+ img = torch.cat((txt, img), 1)
+ if aggressive_offload:
+ # put half of the single blcoks to gpu
+ for i in range(len(self.single_blocks) // 2):
+ self.single_blocks[i] = self.single_blocks[i].to(DEVICE)
+ for i, block in enumerate(self.single_blocks):
+ if aggressive_offload and i == len(self.single_blocks)//2:
+ # put first half of the single blcoks to cpu and last half to gpu
+ for j in range(len(self.single_blocks) // 2):
+ self.single_blocks[j].cpu()
+ for j in range(len(self.single_blocks) // 2, len(self.single_blocks)):
+ self.single_blocks[j] = self.single_blocks[j].to(DEVICE)
+ x = block(img, vec=vec, pe=pe)
+ real_img, txt = x[:, txt.shape[1]:, ...], x[:, :txt.shape[1], ...]
+
+ if i % self.pulid_single_interval == 0 and id is not None:
+ real_img = real_img + id_weight * self.pulid_ca[ca_idx](id, real_img)
+ ca_idx += 1
+
+ img = torch.cat((txt, real_img), 1)
+ if aggressive_offload:
+ self.single_blocks.cpu()
+ img = img[:, txt.shape[1] :, ...]
+
+ img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels)
+ return img
+
+ def components_to_gpu(self):
+ # everything but double_blocks, single_blocks
+ self.img_in.to(DEVICE)
+ self.time_in.to(DEVICE)
+ self.guidance_in.to(DEVICE)
+ self.vector_in.to(DEVICE)
+ self.txt_in.to(DEVICE)
+ self.pe_embedder.to(DEVICE)
+ self.final_layer.to(DEVICE)
+ if self.pulid_ca:
+ self.pulid_ca.to(DEVICE)
diff --git a/flux/modules/__init__.py b/flux/modules/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/flux/modules/autoencoder.py b/flux/modules/autoencoder.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d40adda922ff193e227765d774283a8ab62681a
--- /dev/null
+++ b/flux/modules/autoencoder.py
@@ -0,0 +1,317 @@
+from dataclasses import dataclass
+
+import torch
+from einops import rearrange
+from torch import Tensor, nn
+
+
+@dataclass
+class AutoEncoderParams:
+ resolution: int
+ in_channels: int
+ ch: int
+ out_ch: int
+ ch_mult: list[int]
+ num_res_blocks: int
+ z_channels: int
+ scale_factor: float
+ shift_factor: float
+
+
+def swish(x: Tensor) -> Tensor:
+ return x * torch.sigmoid(x)
+
+
+class AttnBlock(nn.Module):
+ def __init__(self, in_channels: int):
+ super().__init__()
+ self.in_channels = in_channels
+
+ self.norm = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
+
+ self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1)
+ self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1)
+ self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1)
+ self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1)
+
+ def attention(self, h_: Tensor) -> Tensor:
+ h_ = self.norm(h_)
+ q = self.q(h_)
+ k = self.k(h_)
+ v = self.v(h_)
+
+ b, c, h, w = q.shape
+ q = rearrange(q, "b c h w -> b 1 (h w) c").contiguous()
+ k = rearrange(k, "b c h w -> b 1 (h w) c").contiguous()
+ v = rearrange(v, "b c h w -> b 1 (h w) c").contiguous()
+ h_ = nn.functional.scaled_dot_product_attention(q, k, v)
+
+ return rearrange(h_, "b 1 (h w) c -> b c h w", h=h, w=w, c=c, b=b)
+
+ def forward(self, x: Tensor) -> Tensor:
+ return x + self.proj_out(self.attention(x))
+
+
+class ResnetBlock(nn.Module):
+ def __init__(self, in_channels: int, out_channels: int):
+ super().__init__()
+ self.in_channels = in_channels
+ out_channels = in_channels if out_channels is None else out_channels
+ self.out_channels = out_channels
+
+ self.norm1 = nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
+ self.norm2 = nn.GroupNorm(num_groups=32, num_channels=out_channels, eps=1e-6, affine=True)
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
+ if self.in_channels != self.out_channels:
+ self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
+
+ def forward(self, x):
+ h = x
+ h = self.norm1(h)
+ h = swish(h)
+ h = self.conv1(h)
+
+ h = self.norm2(h)
+ h = swish(h)
+ h = self.conv2(h)
+
+ if self.in_channels != self.out_channels:
+ x = self.nin_shortcut(x)
+
+ return x + h
+
+
+class Downsample(nn.Module):
+ def __init__(self, in_channels: int):
+ super().__init__()
+ # no asymmetric padding in torch conv, must do it ourselves
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
+
+ def forward(self, x: Tensor):
+ pad = (0, 1, 0, 1)
+ x = nn.functional.pad(x, pad, mode="constant", value=0)
+ x = self.conv(x)
+ return x
+
+
+class Upsample(nn.Module):
+ def __init__(self, in_channels: int):
+ super().__init__()
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
+
+ def forward(self, x: Tensor):
+ x = nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
+ x = self.conv(x)
+ return x
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ resolution: int,
+ in_channels: int,
+ ch: int,
+ ch_mult: list[int],
+ num_res_blocks: int,
+ z_channels: int,
+ ):
+ super().__init__()
+ self.ch = ch
+ self.num_resolutions = len(ch_mult)
+ self.num_res_blocks = num_res_blocks
+ self.resolution = resolution
+ self.in_channels = in_channels
+ # downsampling
+ self.conv_in = nn.Conv2d(in_channels, self.ch, kernel_size=3, stride=1, padding=1)
+
+ curr_res = resolution
+ in_ch_mult = (1,) + tuple(ch_mult)
+ self.in_ch_mult = in_ch_mult
+ self.down = nn.ModuleList()
+ block_in = self.ch
+ for i_level in range(self.num_resolutions):
+ block = nn.ModuleList()
+ attn = nn.ModuleList()
+ block_in = ch * in_ch_mult[i_level]
+ block_out = ch * ch_mult[i_level]
+ for _ in range(self.num_res_blocks):
+ block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
+ block_in = block_out
+ down = nn.Module()
+ down.block = block
+ down.attn = attn
+ if i_level != self.num_resolutions - 1:
+ down.downsample = Downsample(block_in)
+ curr_res = curr_res // 2
+ self.down.append(down)
+
+ # middle
+ self.mid = nn.Module()
+ self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
+ self.mid.attn_1 = AttnBlock(block_in)
+ self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
+
+ # end
+ self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
+ self.conv_out = nn.Conv2d(block_in, 2 * z_channels, kernel_size=3, stride=1, padding=1)
+
+ def forward(self, x: Tensor) -> Tensor:
+ # downsampling
+ hs = [self.conv_in(x)]
+ for i_level in range(self.num_resolutions):
+ for i_block in range(self.num_res_blocks):
+ h = self.down[i_level].block[i_block](hs[-1])
+ if len(self.down[i_level].attn) > 0:
+ h = self.down[i_level].attn[i_block](h)
+ hs.append(h)
+ if i_level != self.num_resolutions - 1:
+ hs.append(self.down[i_level].downsample(hs[-1]))
+
+ # middle
+ h = hs[-1]
+ h = self.mid.block_1(h)
+ h = self.mid.attn_1(h)
+ h = self.mid.block_2(h)
+ # end
+ h = self.norm_out(h)
+ h = swish(h)
+ h = self.conv_out(h)
+ return h
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self,
+ ch: int,
+ out_ch: int,
+ ch_mult: list[int],
+ num_res_blocks: int,
+ in_channels: int,
+ resolution: int,
+ z_channels: int,
+ ):
+ super().__init__()
+ self.ch = ch
+ self.num_resolutions = len(ch_mult)
+ self.num_res_blocks = num_res_blocks
+ self.resolution = resolution
+ self.in_channels = in_channels
+ self.ffactor = 2 ** (self.num_resolutions - 1)
+
+ # compute in_ch_mult, block_in and curr_res at lowest res
+ block_in = ch * ch_mult[self.num_resolutions - 1]
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
+ self.z_shape = (1, z_channels, curr_res, curr_res)
+
+ # z to block_in
+ self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
+
+ # middle
+ self.mid = nn.Module()
+ self.mid.block_1 = ResnetBlock(in_channels=block_in, out_channels=block_in)
+ self.mid.attn_1 = AttnBlock(block_in)
+ self.mid.block_2 = ResnetBlock(in_channels=block_in, out_channels=block_in)
+
+ # upsampling
+ self.up = nn.ModuleList()
+ for i_level in reversed(range(self.num_resolutions)):
+ block = nn.ModuleList()
+ attn = nn.ModuleList()
+ block_out = ch * ch_mult[i_level]
+ for _ in range(self.num_res_blocks + 1):
+ block.append(ResnetBlock(in_channels=block_in, out_channels=block_out))
+ block_in = block_out
+ up = nn.Module()
+ up.block = block
+ up.attn = attn
+ if i_level != 0:
+ up.upsample = Upsample(block_in)
+ curr_res = curr_res * 2
+ self.up.insert(0, up) # prepend to get consistent order
+
+ # end
+ self.norm_out = nn.GroupNorm(num_groups=32, num_channels=block_in, eps=1e-6, affine=True)
+ self.conv_out = nn.Conv2d(block_in, out_ch, kernel_size=3, stride=1, padding=1)
+
+ def forward(self, z: Tensor) -> Tensor:
+ # z to block_in
+ h = self.conv_in(z)
+
+ # middle
+ h = self.mid.block_1(h)
+ h = self.mid.attn_1(h)
+ h = self.mid.block_2(h)
+
+ # upsampling
+ for i_level in reversed(range(self.num_resolutions)):
+ for i_block in range(self.num_res_blocks + 1):
+ h = self.up[i_level].block[i_block](h)
+ if len(self.up[i_level].attn) > 0:
+ h = self.up[i_level].attn[i_block](h)
+ if i_level != 0:
+ h = self.up[i_level].upsample(h)
+
+ # end
+ h = self.norm_out(h)
+ h = swish(h)
+ h = self.conv_out(h)
+ return h
+
+
+class DiagonalGaussian(nn.Module):
+ def __init__(self, sample: bool = True, chunk_dim: int = 1):
+ super().__init__()
+ self.sample = sample
+ self.chunk_dim = chunk_dim
+
+ def forward(self, z: Tensor) -> Tensor:
+ mean, logvar = torch.chunk(z, 2, dim=self.chunk_dim)
+ if self.sample:
+ std = torch.exp(0.5 * logvar)
+ return mean + std * torch.randn_like(mean)
+ else:
+ return mean
+
+
+class AutoEncoder(nn.Module):
+ def __init__(self, params: AutoEncoderParams):
+ super().__init__()
+ self.encoder = Encoder(
+ resolution=params.resolution,
+ in_channels=params.in_channels,
+ ch=params.ch,
+ ch_mult=params.ch_mult,
+ num_res_blocks=params.num_res_blocks,
+ z_channels=params.z_channels,
+ )
+ self.decoder = Decoder(
+ resolution=params.resolution,
+ in_channels=params.in_channels,
+ ch=params.ch,
+ out_ch=params.out_ch,
+ ch_mult=params.ch_mult,
+ num_res_blocks=params.num_res_blocks,
+ z_channels=params.z_channels,
+ )
+ self.reg = DiagonalGaussian()
+
+ self.scale_factor = params.scale_factor
+ self.shift_factor = params.shift_factor
+
+ def encode(self, x: Tensor) -> Tensor:
+ z = self.reg(self.encoder(x))
+ z = self.scale_factor * (z - self.shift_factor)
+ return z
+
+ def encode_no_sampling(self, x: Tensor) -> Tensor:
+ z, _ = torch.chunk(self.encoder(x), 2, dim=1)
+ z = self.scale_factor * (z - self.shift_factor)
+ return z
+
+ def decode(self, z: Tensor) -> Tensor:
+ z = z / self.scale_factor + self.shift_factor
+ return self.decoder(z)
+
+ def forward(self, x: Tensor) -> Tensor:
+ return self.decode(self.encode(x))
diff --git a/flux/modules/conditioner.py b/flux/modules/conditioner.py
new file mode 100644
index 0000000000000000000000000000000000000000..e60297e45813862ffdf03b79fd8fbe5b4a17029d
--- /dev/null
+++ b/flux/modules/conditioner.py
@@ -0,0 +1,37 @@
+from torch import Tensor, nn
+from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5Tokenizer
+
+
+class HFEmbedder(nn.Module):
+ def __init__(self, version: str, max_length: int, **hf_kwargs):
+ super().__init__()
+ self.is_clip = version.startswith("openai")
+ self.max_length = max_length
+ self.output_key = "pooler_output" if self.is_clip else "last_hidden_state"
+
+ if self.is_clip:
+ self.tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(version, max_length=max_length)
+ self.hf_module: CLIPTextModel = CLIPTextModel.from_pretrained(version, **hf_kwargs)
+ else:
+ self.tokenizer: T5Tokenizer = T5Tokenizer.from_pretrained(version, max_length=max_length)
+ self.hf_module: T5EncoderModel = T5EncoderModel.from_pretrained(version, **hf_kwargs)
+
+ self.hf_module = self.hf_module.eval().requires_grad_(False)
+
+ def forward(self, text: list[str]) -> Tensor:
+ batch_encoding = self.tokenizer(
+ text,
+ truncation=True,
+ max_length=self.max_length,
+ return_length=False,
+ return_overflowing_tokens=False,
+ padding="max_length",
+ return_tensors="pt",
+ )
+
+ outputs = self.hf_module(
+ input_ids=batch_encoding["input_ids"].to(self.hf_module.device),
+ attention_mask=None,
+ output_hidden_states=False,
+ )
+ return outputs[self.output_key]
diff --git a/flux/modules/layers.py b/flux/modules/layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc36b059dd82101ab1a8ba8f42b2ca9d9df2836a
--- /dev/null
+++ b/flux/modules/layers.py
@@ -0,0 +1,253 @@
+import math
+from dataclasses import dataclass
+
+import torch
+from einops import rearrange
+from torch import Tensor, nn
+
+from flux.math import attention, rope
+
+
+class EmbedND(nn.Module):
+ def __init__(self, dim: int, theta: int, axes_dim: list[int]):
+ super().__init__()
+ self.dim = dim
+ self.theta = theta
+ self.axes_dim = axes_dim
+
+ def forward(self, ids: Tensor) -> Tensor:
+ n_axes = ids.shape[-1]
+ emb = torch.cat(
+ [rope(ids[..., i], self.axes_dim[i], self.theta) for i in range(n_axes)],
+ dim=-3,
+ )
+
+ return emb.unsqueeze(1)
+
+
+def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
+ """
+ Create sinusoidal timestep embeddings.
+ :param t: a 1-D Tensor of N indices, one per batch element.
+ These may be fractional.
+ :param dim: the dimension of the output.
+ :param max_period: controls the minimum frequency of the embeddings.
+ :return: an (N, D) Tensor of positional embeddings.
+ """
+ t = time_factor * t
+ half = dim // 2
+ freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
+ t.device
+ )
+
+ args = t[:, None].float() * freqs[None]
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
+ if dim % 2:
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
+ if torch.is_floating_point(t):
+ embedding = embedding.to(t)
+ return embedding
+
+
+class MLPEmbedder(nn.Module):
+ def __init__(self, in_dim: int, hidden_dim: int):
+ super().__init__()
+ self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
+ self.silu = nn.SiLU()
+ self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
+
+ def forward(self, x: Tensor) -> Tensor:
+ return self.out_layer(self.silu(self.in_layer(x)))
+
+
+class RMSNorm(torch.nn.Module):
+ def __init__(self, dim: int):
+ super().__init__()
+ self.scale = nn.Parameter(torch.ones(dim))
+
+ def forward(self, x: Tensor):
+ x_dtype = x.dtype
+ x = x.float()
+ rrms = torch.rsqrt(torch.mean(x**2, dim=-1, keepdim=True) + 1e-6)
+ return (x * rrms).to(dtype=x_dtype) * self.scale
+
+
+class QKNorm(torch.nn.Module):
+ def __init__(self, dim: int):
+ super().__init__()
+ self.query_norm = RMSNorm(dim)
+ self.key_norm = RMSNorm(dim)
+
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> tuple[Tensor, Tensor]:
+ q = self.query_norm(q)
+ k = self.key_norm(k)
+ return q.to(v), k.to(v)
+
+
+class SelfAttention(nn.Module):
+ def __init__(self, dim: int, num_heads: int = 8, qkv_bias: bool = False):
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.norm = QKNorm(head_dim)
+ self.proj = nn.Linear(dim, dim)
+
+ def forward(self, x: Tensor, pe: Tensor) -> Tensor:
+ qkv = self.qkv(x)
+ q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ q, k = self.norm(q, k, v)
+ x = attention(q, k, v, pe=pe)
+ x = self.proj(x)
+ return x
+
+
+@dataclass
+class ModulationOut:
+ shift: Tensor
+ scale: Tensor
+ gate: Tensor
+
+
+class Modulation(nn.Module):
+ def __init__(self, dim: int, double: bool):
+ super().__init__()
+ self.is_double = double
+ self.multiplier = 6 if double else 3
+ self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
+
+ def forward(self, vec: Tensor) -> tuple[ModulationOut, ModulationOut]:
+ out = self.lin(nn.functional.silu(vec))[:, None, :].chunk(self.multiplier, dim=-1)
+
+ return (
+ ModulationOut(*out[:3]),
+ ModulationOut(*out[3:]) if self.is_double else None,
+ )
+
+
+class DoubleStreamBlock(nn.Module):
+ def __init__(self, hidden_size: int, num_heads: int, mlp_ratio: float, qkv_bias: bool = False):
+ super().__init__()
+
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
+ self.num_heads = num_heads
+ self.hidden_size = hidden_size
+ self.img_mod = Modulation(hidden_size, double=True)
+ self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
+
+ self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.img_mlp = nn.Sequential(
+ nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
+ nn.GELU(approximate="tanh"),
+ nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
+ )
+
+ self.txt_mod = Modulation(hidden_size, double=True)
+ self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
+
+ self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.txt_mlp = nn.Sequential(
+ nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
+ nn.GELU(approximate="tanh"),
+ nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
+ )
+
+ def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor) -> tuple[Tensor, Tensor]:
+ img_mod1, img_mod2 = self.img_mod(vec)
+ txt_mod1, txt_mod2 = self.txt_mod(vec)
+
+ # prepare image for attention
+ img_modulated = self.img_norm1(img)
+ img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
+ img_qkv = self.img_attn.qkv(img_modulated)
+ img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
+
+ # prepare txt for attention
+ txt_modulated = self.txt_norm1(txt)
+ txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
+ txt_qkv = self.txt_attn.qkv(txt_modulated)
+ txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
+
+ # run actual attention
+ q = torch.cat((txt_q, img_q), dim=2)
+ k = torch.cat((txt_k, img_k), dim=2)
+ v = torch.cat((txt_v, img_v), dim=2)
+
+ attn = attention(q, k, v, pe=pe)
+ txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]
+
+ # calculate the img bloks
+ img = img + img_mod1.gate * self.img_attn.proj(img_attn)
+ img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift)
+
+ # calculate the txt bloks
+ txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn)
+ txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
+ return img, txt
+
+
+class SingleStreamBlock(nn.Module):
+ """
+ A DiT block with parallel linear layers as described in
+ https://arxiv.org/abs/2302.05442 and adapted modulation interface.
+ """
+
+ def __init__(
+ self,
+ hidden_size: int,
+ num_heads: int,
+ mlp_ratio: float = 4.0,
+ qk_scale: float = None,
+ ):
+ super().__init__()
+ self.hidden_dim = hidden_size
+ self.num_heads = num_heads
+ head_dim = hidden_size // num_heads
+ self.scale = qk_scale or head_dim**-0.5
+
+ self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
+ # qkv and mlp_in
+ self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
+ # proj and mlp_out
+ self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
+
+ self.norm = QKNorm(head_dim)
+
+ self.hidden_size = hidden_size
+ self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+
+ self.mlp_act = nn.GELU(approximate="tanh")
+ self.modulation = Modulation(hidden_size, double=False)
+
+ def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
+ mod, _ = self.modulation(vec)
+ x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
+ qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
+
+ q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
+ q, k = self.norm(q, k, v)
+
+ # compute attention
+ attn = attention(q, k, v, pe=pe)
+ # compute activation in mlp stream, cat again and run second linear layer
+ output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
+ return x + mod.gate * output
+
+
+class LastLayer(nn.Module):
+ def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
+ super().__init__()
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
+
+ def forward(self, x: Tensor, vec: Tensor) -> Tensor:
+ shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
+ x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
+ x = self.linear(x)
+ return x
diff --git a/flux/sampling.py b/flux/sampling.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa056c2ba5736d06d563f41643de3e7c09b4e086
--- /dev/null
+++ b/flux/sampling.py
@@ -0,0 +1,299 @@
+import math
+from typing import Callable
+
+import torch
+from einops import rearrange, repeat
+from torch import Tensor
+from tqdm import tqdm
+
+from .model import Flux
+from .modules.conditioner import HFEmbedder
+
+
+def get_noise(
+ num_samples: int,
+ height: int,
+ width: int,
+ device: torch.device,
+ dtype: torch.dtype,
+ seed: int,
+):
+ return torch.randn(
+ num_samples,
+ 16,
+ # allow for packing
+ 2 * math.ceil(height / 16),
+ 2 * math.ceil(width / 16),
+ device=device,
+ dtype=dtype,
+ generator=torch.Generator(device=device).manual_seed(seed),
+ )
+
+
+def prepare(t5: HFEmbedder, clip: HFEmbedder, img: Tensor, prompt: str) -> dict[str, Tensor]:
+ bs, c, h, w = img.shape
+ if bs == 1 and not isinstance(prompt, str):
+ bs = len(prompt)
+
+ img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2)
+ if img.shape[0] == 1 and bs > 1:
+ img = repeat(img, "1 ... -> bs ...", bs=bs)
+
+ img_ids = torch.zeros(h // 2, w // 2, 3)
+ img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
+ img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :]
+ img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs)
+ img_ids = img_ids.to(img.dtype)
+ img_ids = img_ids.to(torch.bfloat16)
+
+ if isinstance(prompt, str):
+ prompt = [prompt]
+ txt = t5(prompt)
+ if txt.shape[0] == 1 and bs > 1:
+ txt = repeat(txt, "1 ... -> bs ...", bs=bs)
+ txt_ids = torch.zeros(bs, txt.shape[1], 3).to(txt.dtype)
+ txt_ids = txt_ids.to(torch.bfloat16)
+
+ vec = clip(prompt)
+ if vec.shape[0] == 1 and bs > 1:
+ vec = repeat(vec, "1 ... -> bs ...", bs=bs)
+
+ return {
+ "img": img,
+ "img_ids": img_ids.to(img.device),
+ "txt": txt.to(img.device),
+ "txt_ids": txt_ids.to(img.device),
+ "vec": vec.to(img.device),
+ }
+
+
+def time_shift(mu: float, sigma: float, t: Tensor):
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
+
+
+def get_lin_function(
+ x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15
+) -> Callable[[float], float]:
+ m = (y2 - y1) / (x2 - x1)
+ b = y1 - m * x1
+ return lambda x: m * x + b
+
+
+def get_schedule(
+ num_steps: int,
+ image_seq_len: int,
+ base_shift: float = 0.5,
+ max_shift: float = 1.15,
+ shift: bool = True,
+) -> list[float]:
+ # extra step for zero
+ timesteps = torch.linspace(1, 0, num_steps + 1)
+
+ # shifting the schedule to favor high timesteps for higher signal images
+ if shift:
+ # eastimate mu based on linear estimation between two points
+ mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
+ timesteps = time_shift(mu, 1.0, timesteps)
+
+ return timesteps.tolist()
+
+def rf_inversion(
+ model: Flux,
+ img: Tensor,
+ img_ids: Tensor,
+ txt: Tensor,
+ txt_ids: Tensor,
+ vec: Tensor,
+ timesteps: list[float],
+ guidance: float = 4.0,
+ id_weight=1.0,
+ id=None,
+ start_step=0,
+ uncond_id=None,
+ true_cfg=1.0,
+ timestep_to_start_cfg=1,
+ neg_txt=None,
+ neg_txt_ids=None,
+ neg_vec=None,
+ aggressive_offload=False,
+ y_1: Tensor = None,
+ gamma: float = 0.5,
+):
+ # reverse the timesteps
+ timesteps = timesteps[::-1]
+ guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
+ use_true_cfg = abs(true_cfg - 1.0) > 1e-2
+ for i in tqdm(range(len(timesteps) - 1), desc="Inverting"):
+ t_i = i / len(timesteps)
+ t_vec = torch.full((img.shape[0],), t_i, dtype=img.dtype, device=img.device)
+ pred = model(
+ img=img,
+ img_ids=img_ids,
+ txt=txt,
+ txt_ids=txt_ids,
+ y=vec,
+ timesteps=t_vec,
+ guidance=guidance_vec,
+ id=id if (len(timesteps) - 1 - i) >= start_step else None,
+ id_weight=id_weight,
+ aggressive_offload=aggressive_offload,
+ )
+
+ if use_true_cfg and i >= timestep_to_start_cfg:
+ neg_pred = model(
+ img=img,
+ img_ids=img_ids,
+ txt=neg_txt,
+ txt_ids=neg_txt_ids,
+ y=neg_vec,
+ timesteps=t_vec,
+ guidance=guidance_vec,
+ id=uncond_id if (len(timesteps) - 1 - i) >= start_step else None,
+ id_weight=id_weight,
+ aggressive_offload=aggressive_offload,
+ )
+ pred = neg_pred + true_cfg * (pred - neg_pred)
+
+ assert (1 - t_i) != 0
+ u_t_i_cond = (y_1 - img) / (1 - t_i)
+ pred = pred + gamma * (u_t_i_cond - pred)
+
+ img = img + (timesteps[i+1] - timesteps[i]) * pred
+
+ return img
+
+def rf_denoise(
+ model: Flux,
+ img: Tensor,
+ img_ids: Tensor,
+ txt: Tensor,
+ txt_ids: Tensor,
+ vec: Tensor,
+ timesteps: list[float],
+ guidance: float = 4.0,
+ id_weight=1.0,
+ id=None,
+ start_step=0,
+ uncond_id=None,
+ true_cfg=1.0,
+ timestep_to_start_cfg=1,
+ neg_txt=None,
+ neg_txt_ids=None,
+ neg_vec=None,
+ aggressive_offload=False,
+ y_0: Tensor = None,
+ eta=0.9,
+ s=0,
+ tau=6,
+):
+ guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
+ use_true_cfg = abs(true_cfg - 1.0) > 1e-2
+ for i in tqdm(range(len(timesteps) - 1), desc="Denoising"):
+ t_i = i / len(timesteps)
+ t_vec = torch.full((img.shape[0],), 1-t_i, dtype=img.dtype, device=img.device)
+ pred = model(
+ img=img,
+ img_ids=img_ids,
+ txt=txt,
+ txt_ids=txt_ids,
+ y=vec,
+ timesteps=t_vec,
+ guidance=guidance_vec,
+ id=id if i >= start_step else None,
+ id_weight=id_weight,
+ aggressive_offload=aggressive_offload,
+ )
+
+ if use_true_cfg and i >= timestep_to_start_cfg:
+ neg_pred = model(
+ img=img,
+ img_ids=img_ids,
+ txt=neg_txt,
+ txt_ids=neg_txt_ids,
+ y=neg_vec,
+ timesteps=t_vec,
+ guidance=guidance_vec,
+ id=uncond_id if i >= start_step else None,
+ id_weight=id_weight,
+ aggressive_offload=aggressive_offload,
+ )
+ pred = neg_pred + true_cfg * (pred - neg_pred)
+ pred = -pred
+
+ assert (1 - t_i) != 0
+ v_t_cond = (y_0 - img) / (1 - t_i)
+ eta_t = eta if s <= i < tau else 0
+ pred = pred + eta_t * (v_t_cond - pred)
+
+ img = img + (timesteps[i] - timesteps[i+1]) * pred
+
+ return img
+
+def denoise(
+ model: Flux,
+ # model input
+ img: Tensor,
+ img_ids: Tensor,
+ txt: Tensor,
+ txt_ids: Tensor,
+ vec: Tensor,
+ timesteps: list[float],
+ guidance: float = 4.0,
+ id_weight=1.0,
+ id=None,
+ start_step=0,
+ uncond_id=None,
+ true_cfg=1.0,
+ timestep_to_start_cfg=1,
+ neg_txt=None,
+ neg_txt_ids=None,
+ neg_vec=None,
+ aggressive_offload=False,
+):
+ # this is ignored for schnell
+ guidance_vec = torch.full((img.shape[0],), guidance, device=img.device, dtype=img.dtype)
+ use_true_cfg = abs(true_cfg - 1.0) > 1e-2
+ for i, (t_curr, t_prev) in enumerate(zip(timesteps[:-1], timesteps[1:])):
+ t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
+ pred = model(
+ img=img,
+ img_ids=img_ids,
+ txt=txt,
+ txt_ids=txt_ids,
+ y=vec,
+ timesteps=t_vec,
+ guidance=guidance_vec,
+ id=id if i >= start_step else None,
+ id_weight=id_weight,
+ aggressive_offload=aggressive_offload,
+ )
+
+ if use_true_cfg and i >= timestep_to_start_cfg:
+ neg_pred = model(
+ img=img,
+ img_ids=img_ids,
+ txt=neg_txt,
+ txt_ids=neg_txt_ids,
+ y=neg_vec,
+ timesteps=t_vec,
+ guidance=guidance_vec,
+ id=uncond_id if i >= start_step else None,
+ id_weight=id_weight,
+ aggressive_offload=aggressive_offload,
+ )
+ pred = neg_pred + true_cfg * (pred - neg_pred)
+
+ img = img + (t_prev - t_curr) * pred
+
+ return img
+
+
+def unpack(x: Tensor, height: int, width: int) -> Tensor:
+ return rearrange(
+ x,
+ "b (h w) (c ph pw) -> b c (h ph) (w pw)",
+ h=math.ceil(height / 16),
+ w=math.ceil(width / 16),
+ ph=2,
+ pw=2,
+ )
diff --git a/flux/util.py b/flux/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..bacdb39f7c6b958f9bed75fe23f1ca52b24d4166
--- /dev/null
+++ b/flux/util.py
@@ -0,0 +1,249 @@
+import json
+import os
+from dataclasses import dataclass
+
+import torch
+from huggingface_hub import hf_hub_download
+from safetensors.torch import load_file as load_sft
+
+from flux.model import Flux, FluxParams
+from flux.modules.autoencoder import AutoEncoder, AutoEncoderParams
+from flux.modules.conditioner import HFEmbedder
+
+
+@dataclass
+class SamplingOptions:
+ prompt: str
+ width: int
+ height: int
+ num_steps: int
+ guidance: float
+ seed: int
+
+
+@dataclass
+class ModelSpec:
+ params: FluxParams
+ ae_params: AutoEncoderParams
+ ckpt_path: str
+ ae_path: str
+ repo_id: str
+ repo_flow: str
+ repo_ae: str
+
+
+configs = {
+ "flux-dev": ModelSpec(
+ repo_id="black-forest-labs/FLUX.1-dev",
+ repo_flow="flux1-dev.safetensors",
+ repo_ae="ae.safetensors",
+ ckpt_path='models/flux1-dev.safetensors',
+ params=FluxParams(
+ in_channels=64,
+ vec_in_dim=768,
+ context_in_dim=4096,
+ hidden_size=3072,
+ mlp_ratio=4.0,
+ num_heads=24,
+ depth=19,
+ depth_single_blocks=38,
+ axes_dim=[16, 56, 56],
+ theta=10_000,
+ qkv_bias=True,
+ guidance_embed=True,
+ ),
+ ae_path='models/ae.safetensors',
+ ae_params=AutoEncoderParams(
+ resolution=256,
+ in_channels=3,
+ ch=128,
+ out_ch=3,
+ ch_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ z_channels=16,
+ scale_factor=0.3611,
+ shift_factor=0.1159,
+ ),
+ ),
+ "flux-schnell": ModelSpec(
+ repo_id="black-forest-labs/FLUX.1-schnell",
+ repo_flow="flux1-schnell.safetensors",
+ repo_ae="ae.safetensors",
+ ckpt_path=os.getenv("FLUX_SCHNELL"),
+ params=FluxParams(
+ in_channels=64,
+ vec_in_dim=768,
+ context_in_dim=4096,
+ hidden_size=3072,
+ mlp_ratio=4.0,
+ num_heads=24,
+ depth=19,
+ depth_single_blocks=38,
+ axes_dim=[16, 56, 56],
+ theta=10_000,
+ qkv_bias=True,
+ guidance_embed=False,
+ ),
+ ae_path=os.getenv("AE"),
+ ae_params=AutoEncoderParams(
+ resolution=256,
+ in_channels=3,
+ ch=128,
+ out_ch=3,
+ ch_mult=[1, 2, 4, 4],
+ num_res_blocks=2,
+ z_channels=16,
+ scale_factor=0.3611,
+ shift_factor=0.1159,
+ ),
+ ),
+}
+
+
+def print_load_warning(missing: list[str], unexpected: list[str]) -> None:
+ if len(missing) > 0 and len(unexpected) > 0:
+ print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing))
+ print("\n" + "-" * 79 + "\n")
+ print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected))
+ elif len(missing) > 0:
+ print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing))
+ elif len(unexpected) > 0:
+ print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected))
+
+
+def load_flow_model(name: str, device: str = "cuda", hf_download: bool = True):
+ # Loading Flux
+ print("Init model")
+ ckpt_path = configs[name].ckpt_path
+ if (
+ not os.path.exists(ckpt_path)
+ and configs[name].repo_id is not None
+ and configs[name].repo_flow is not None
+ and hf_download
+ ):
+ ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_flow, local_dir='models')
+
+# Initialize the model on the 'meta' device, which doesn't allocate real memory
+ with torch.device('meta'):
+ model = Flux(configs[name].params)
+ model = model.to_empty(device=device)
+
+ if ckpt_path is not None:
+ print("Loading checkpoint")
+ # Load the state dictionary directly to the desired device
+ sd = load_sft(ckpt_path, device=str(device))
+ # Load the state dictionary into the model
+ missing, unexpected = model.load_state_dict(sd, strict=False)
+ print_load_warning(missing, unexpected)
+ model.to(torch.bfloat16)
+ return model
+
+# from XLabs-AI https://github.com/XLabs-AI/x-flux/blob/1f8ef54972105ad9062be69fe6b7f841bce02a08/src/flux/util.py#L330
+def load_flow_model_quintized(name: str, device: str = "cuda", hf_download: bool = True):
+ # Loading Flux
+ print("Init model")
+ ckpt_path = 'models/flux-dev-fp8.safetensors'
+ if (
+ not os.path.exists(ckpt_path)
+ and hf_download
+ ):
+ print("Downloading model")
+ ckpt_path = hf_hub_download("XLabs-AI/flux-dev-fp8", "flux-dev-fp8.safetensors")
+ print("Model downloaded to", ckpt_path)
+ json_path = hf_hub_download("XLabs-AI/flux-dev-fp8", 'flux_dev_quantization_map.json')
+
+ model = Flux(configs[name].params).to(torch.bfloat16)
+def load_flow_model_quintized(
+ name: str,
+ device: str = "cuda",
+ hf_download: bool = True,
+ cache_path: str = None,
+):
+ """
+ Loads (or downloads) a FLUX-fp8 checkpoint, performs quantization once,
+ and caches the quantized model to disk. Future calls load from cache.
+
+ :param name: model name key in configs (e.g. "flux-dev-fp8")
+ :param device: Torch device string ("cuda" or "cpu")
+ :param hf_download: Whether to download from HF if local ckpt is missing
+ :param cache_path: Filepath for cached quantized model
+ :return: A quantized FLUX model on the specified device.
+ """
+ if cache_path is None:
+ cache_path = os.path.join(os.path.expanduser("~"), ".cache/flux_dev_fp8_quantized_model.pth")
+
+
+
+ # 1) Check if we already have a cached, quantized model
+ if os.path.exists(cache_path):
+ print(f"Loading cached quantized model from '{cache_path}'...")
+ model = torch.load(cache_path, map_location=device)
+ return model.to(device)
+
+ # 2) If no cache, build and quantize for the first time.
+ print("No cached model found. Initializing + quantizing from scratch.")
+
+ # (A) Download or specify checkpoint paths
+ ckpt_path = "models/flux-dev-fp8.safetensors"
+ if not os.path.exists(ckpt_path) and hf_download:
+ print("Downloading model checkpoint from HF...")
+ ckpt_path = hf_hub_download("XLabs-AI/flux-dev-fp8", "flux-dev-fp8.safetensors")
+ print("Model downloaded to:", ckpt_path)
+
+ json_path = hf_hub_download("XLabs-AI/flux-dev-fp8", "flux_dev_quantization_map.json")
+
+ # (B) Build the unquantized model
+ print("Initializing model in bfloat16...")
+ model = Flux(configs[name].params).to(torch.bfloat16)
+
+ # (C) Load the unquantized weights
+ print("Loading unquantized checkpoint to CPU...")
+ sd = load_sft(ckpt_path, device="cpu") # CPU load
+
+ # (D) Load quantization map
+ with open(json_path, "r") as f:
+ quantization_map = json.load(f)
+
+ # (E) Quantize
+ print("Starting quantization process...")
+ from optimum.quanto import requantize
+ requantize(model, sd, quantization_map, device=device)
+ print("Quantization complete.")
+
+ # (F) Cache the fully quantized model to disk
+ print(f"Saving the quantized model to '{cache_path}'...")
+ torch.save(model, cache_path)
+ print("Model saved. Future runs will load from cache.")
+
+ return model.to(device)
+
+
+def load_t5(device: str = "cuda", max_length: int = 512) -> HFEmbedder:
+ # max length 64, 128, 256 and 512 should work (if your sequence is short enough)
+ return HFEmbedder("xlabs-ai/xflux_text_encoders", max_length=max_length, torch_dtype=torch.bfloat16).to(device)
+
+
+def load_clip(device: str = "cuda") -> HFEmbedder:
+ return HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
+
+
+def load_ae(name: str, device: str = "cuda", hf_download: bool = True) -> AutoEncoder:
+ ckpt_path = configs[name].ae_path
+ if (
+ not os.path.exists(ckpt_path)
+ and configs[name].repo_id is not None
+ and configs[name].repo_ae is not None
+ and hf_download
+ ):
+ ckpt_path = hf_hub_download(configs[name].repo_id, configs[name].repo_ae, local_dir='models')
+
+ # Loading the autoencoder
+ print("Init AE")
+ with torch.device(device):
+ ae = AutoEncoder(configs[name].ae_params)
+
+ if ckpt_path is not None:
+ sd = load_sft(ckpt_path, device=str(device))
+ missing, unexpected = ae.load_state_dict(sd, strict=False)
+ print_load_warning(missing, unexpected)
+ return ae
diff --git a/fonts/arial.ttf b/fonts/arial.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..7ff88f22869126cc992030f18e0eeff65ec8bbac
Binary files /dev/null and b/fonts/arial.ttf differ
diff --git a/pulid/attention_processor.py b/pulid/attention_processor.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb4f9da179952101e745861c0b112d1e592b2c84
--- /dev/null
+++ b/pulid/attention_processor.py
@@ -0,0 +1,422 @@
+# modified from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+NUM_ZERO = 0
+ORTHO = False
+ORTHO_v2 = False
+
+
+class AttnProcessor(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ id_embedding=None,
+ id_scale=1.0,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ query = attn.head_to_batch_dim(query)
+ key = attn.head_to_batch_dim(key)
+ value = attn.head_to_batch_dim(value)
+
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
+ hidden_states = torch.bmm(attention_probs, value)
+ hidden_states = attn.batch_to_head_dim(hidden_states)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+class IDAttnProcessor(nn.Module):
+ r"""
+ Attention processor for ID-Adapater.
+ Args:
+ hidden_size (`int`):
+ The hidden size of the attention layer.
+ cross_attention_dim (`int`):
+ The number of channels in the `encoder_hidden_states`.
+ scale (`float`, defaults to 1.0):
+ the weight scale of image prompt.
+ """
+
+ def __init__(self, hidden_size, cross_attention_dim=None):
+ super().__init__()
+ self.id_to_k = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
+ self.id_to_v = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ id_embedding=None,
+ id_scale=1.0,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ query = attn.head_to_batch_dim(query)
+ key = attn.head_to_batch_dim(key)
+ value = attn.head_to_batch_dim(value)
+
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
+ hidden_states = torch.bmm(attention_probs, value)
+ hidden_states = attn.batch_to_head_dim(hidden_states)
+
+ # for id-adapter
+ if id_embedding is not None:
+ if NUM_ZERO == 0:
+ id_key = self.id_to_k(id_embedding)
+ id_value = self.id_to_v(id_embedding)
+ else:
+ zero_tensor = torch.zeros(
+ (id_embedding.size(0), NUM_ZERO, id_embedding.size(-1)),
+ dtype=id_embedding.dtype,
+ device=id_embedding.device,
+ )
+ id_key = self.id_to_k(torch.cat((id_embedding, zero_tensor), dim=1))
+ id_value = self.id_to_v(torch.cat((id_embedding, zero_tensor), dim=1))
+
+ id_key = attn.head_to_batch_dim(id_key).to(query.dtype)
+ id_value = attn.head_to_batch_dim(id_value).to(query.dtype)
+
+ id_attention_probs = attn.get_attention_scores(query, id_key, None)
+ id_hidden_states = torch.bmm(id_attention_probs, id_value)
+ id_hidden_states = attn.batch_to_head_dim(id_hidden_states)
+
+ if not ORTHO:
+ hidden_states = hidden_states + id_scale * id_hidden_states
+ else:
+ projection = (
+ torch.sum((hidden_states * id_hidden_states), dim=-2, keepdim=True)
+ / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True)
+ * hidden_states
+ )
+ orthogonal = id_hidden_states - projection
+ hidden_states = hidden_states + id_scale * orthogonal
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+class AttnProcessor2_0(nn.Module):
+ r"""
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
+ """
+
+ def __init__(self):
+ super().__init__()
+ if not hasattr(F, "scaled_dot_product_attention"):
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ id_embedding=None,
+ id_scale=1.0,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+
+ if attention_mask is not None:
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+ # scaled_dot_product_attention expects attention_mask shape to be
+ # (batch, heads, source_length, target_length)
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // attn.heads
+
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ hidden_states = F.scaled_dot_product_attention(
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
+ )
+
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
+ hidden_states = hidden_states.to(query.dtype)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
+
+
+class IDAttnProcessor2_0(torch.nn.Module):
+ r"""
+ Attention processor for ID-Adapater for PyTorch 2.0.
+ Args:
+ hidden_size (`int`):
+ The hidden size of the attention layer.
+ cross_attention_dim (`int`):
+ The number of channels in the `encoder_hidden_states`.
+ """
+
+ def __init__(self, hidden_size, cross_attention_dim=None):
+ super().__init__()
+ if not hasattr(F, "scaled_dot_product_attention"):
+ raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
+
+ self.id_to_k = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
+ self.id_to_v = nn.Linear(cross_attention_dim or hidden_size, hidden_size, bias=False)
+
+ def __call__(
+ self,
+ attn,
+ hidden_states,
+ encoder_hidden_states=None,
+ attention_mask=None,
+ temb=None,
+ id_embedding=None,
+ id_scale=1.0,
+ ):
+ residual = hidden_states
+
+ if attn.spatial_norm is not None:
+ hidden_states = attn.spatial_norm(hidden_states, temb)
+
+ input_ndim = hidden_states.ndim
+
+ if input_ndim == 4:
+ batch_size, channel, height, width = hidden_states.shape
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
+
+ batch_size, sequence_length, _ = (
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
+ )
+
+ if attention_mask is not None:
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
+ # scaled_dot_product_attention expects attention_mask shape to be
+ # (batch, heads, source_length, target_length)
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
+
+ if attn.group_norm is not None:
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
+
+ query = attn.to_q(hidden_states)
+
+ if encoder_hidden_states is None:
+ encoder_hidden_states = hidden_states
+ elif attn.norm_cross:
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
+
+ key = attn.to_k(encoder_hidden_states)
+ value = attn.to_v(encoder_hidden_states)
+
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // attn.heads
+
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ hidden_states = F.scaled_dot_product_attention(
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
+ )
+
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
+ hidden_states = hidden_states.to(query.dtype)
+
+ # for id embedding
+ if id_embedding is not None:
+ if NUM_ZERO == 0:
+ id_key = self.id_to_k(id_embedding).to(query.dtype)
+ id_value = self.id_to_v(id_embedding).to(query.dtype)
+ else:
+ zero_tensor = torch.zeros(
+ (id_embedding.size(0), NUM_ZERO, id_embedding.size(-1)),
+ dtype=id_embedding.dtype,
+ device=id_embedding.device,
+ )
+ id_key = self.id_to_k(torch.cat((id_embedding, zero_tensor), dim=1)).to(query.dtype)
+ id_value = self.id_to_v(torch.cat((id_embedding, zero_tensor), dim=1)).to(query.dtype)
+
+ id_key = id_key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+ id_value = id_value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
+
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
+ id_hidden_states = F.scaled_dot_product_attention(
+ query, id_key, id_value, attn_mask=None, dropout_p=0.0, is_causal=False
+ )
+
+ id_hidden_states = id_hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
+ id_hidden_states = id_hidden_states.to(query.dtype)
+
+ if not ORTHO and not ORTHO_v2:
+ hidden_states = hidden_states + id_scale * id_hidden_states
+ elif ORTHO_v2:
+ orig_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ id_hidden_states = id_hidden_states.to(torch.float32)
+ attn_map = query @ id_key.transpose(-2, -1)
+ attn_mean = attn_map.softmax(dim=-1).mean(dim=1)
+ attn_mean = attn_mean[:, :, :5].sum(dim=-1, keepdim=True)
+ projection = (
+ torch.sum((hidden_states * id_hidden_states), dim=-2, keepdim=True)
+ / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True)
+ * hidden_states
+ )
+ orthogonal = id_hidden_states + (attn_mean - 1) * projection
+ hidden_states = hidden_states + id_scale * orthogonal
+ hidden_states = hidden_states.to(orig_dtype)
+ else:
+ orig_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ id_hidden_states = id_hidden_states.to(torch.float32)
+ projection = (
+ torch.sum((hidden_states * id_hidden_states), dim=-2, keepdim=True)
+ / torch.sum((hidden_states * hidden_states), dim=-2, keepdim=True)
+ * hidden_states
+ )
+ orthogonal = id_hidden_states - projection
+ hidden_states = hidden_states + id_scale * orthogonal
+ hidden_states = hidden_states.to(orig_dtype)
+
+ # linear proj
+ hidden_states = attn.to_out[0](hidden_states)
+ # dropout
+ hidden_states = attn.to_out[1](hidden_states)
+
+ if input_ndim == 4:
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
+
+ if attn.residual_connection:
+ hidden_states = hidden_states + residual
+
+ hidden_states = hidden_states / attn.rescale_output_factor
+
+ return hidden_states
diff --git a/pulid/encoders.py b/pulid/encoders.py
new file mode 100644
index 0000000000000000000000000000000000000000..47dff3dbcb0167beb09aa9df8c4ffdc560042a9f
--- /dev/null
+++ b/pulid/encoders.py
@@ -0,0 +1,64 @@
+import torch
+import torch.nn as nn
+
+
+class IDEncoder(nn.Module):
+ def __init__(self, width=1280, context_dim=2048, num_token=5):
+ super().__init__()
+ self.num_token = num_token
+ self.context_dim = context_dim
+ h1 = min((context_dim * num_token) // 4, 1024)
+ h2 = min((context_dim * num_token) // 2, 1024)
+ self.body = nn.Sequential(
+ nn.Linear(width, h1),
+ nn.LayerNorm(h1),
+ nn.LeakyReLU(),
+ nn.Linear(h1, h2),
+ nn.LayerNorm(h2),
+ nn.LeakyReLU(),
+ nn.Linear(h2, context_dim * num_token),
+ )
+
+ for i in range(5):
+ setattr(
+ self,
+ f'mapping_{i}',
+ nn.Sequential(
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, context_dim),
+ ),
+ )
+
+ setattr(
+ self,
+ f'mapping_patch_{i}',
+ nn.Sequential(
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, context_dim),
+ ),
+ )
+
+ def forward(self, x, y):
+ # x shape [N, C]
+ x = self.body(x)
+ x = x.reshape(-1, self.num_token, self.context_dim)
+
+ hidden_states = ()
+ for i, emb in enumerate(y):
+ hidden_state = getattr(self, f'mapping_{i}')(emb[:, :1]) + getattr(self, f'mapping_patch_{i}')(
+ emb[:, 1:]
+ ).mean(dim=1, keepdim=True)
+ hidden_states += (hidden_state,)
+ hidden_states = torch.cat(hidden_states, dim=1)
+
+ return torch.cat([x, hidden_states], dim=1)
diff --git a/pulid/encoders_transformer.py b/pulid/encoders_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..afbdaf36e6cc90fd080c405ded56423f1eaeae11
--- /dev/null
+++ b/pulid/encoders_transformer.py
@@ -0,0 +1,209 @@
+import math
+
+import torch
+import torch.nn as nn
+
+
+# FFN
+def FeedForward(dim, mult=4):
+ inner_dim = int(dim * mult)
+ return nn.Sequential(
+ nn.LayerNorm(dim),
+ nn.Linear(dim, inner_dim, bias=False),
+ nn.GELU(),
+ nn.Linear(inner_dim, dim, bias=False),
+ )
+
+
+def reshape_tensor(x, heads):
+ bs, length, width = x.shape
+ # (bs, length, width) --> (bs, length, n_heads, dim_per_head)
+ x = x.view(bs, length, heads, -1)
+ # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
+ x = x.transpose(1, 2)
+ # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
+ x = x.reshape(bs, heads, length, -1)
+ return x
+
+
+class PerceiverAttentionCA(nn.Module):
+ def __init__(self, *, dim=3072, dim_head=128, heads=16, kv_dim=2048):
+ super().__init__()
+ self.scale = dim_head ** -0.5
+ self.dim_head = dim_head
+ self.heads = heads
+ inner_dim = dim_head * heads
+
+ self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim)
+ self.norm2 = nn.LayerNorm(dim)
+
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
+ self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False)
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
+
+ def forward(self, x, latents):
+ """
+ Args:
+ x (torch.Tensor): image features
+ shape (b, n1, D)
+ latent (torch.Tensor): latent features
+ shape (b, n2, D)
+ """
+ x = self.norm1(x)
+ latents = self.norm2(latents)
+
+ b, seq_len, _ = latents.shape
+
+ q = self.to_q(latents)
+ k, v = self.to_kv(x).chunk(2, dim=-1)
+
+ q = reshape_tensor(q, self.heads)
+ k = reshape_tensor(k, self.heads)
+ v = reshape_tensor(v, self.heads)
+
+ # attention
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
+ out = weight @ v
+
+ out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1)
+
+ return self.to_out(out)
+
+
+class PerceiverAttention(nn.Module):
+ def __init__(self, *, dim, dim_head=64, heads=8, kv_dim=None):
+ super().__init__()
+ self.scale = dim_head ** -0.5
+ self.dim_head = dim_head
+ self.heads = heads
+ inner_dim = dim_head * heads
+
+ self.norm1 = nn.LayerNorm(dim if kv_dim is None else kv_dim)
+ self.norm2 = nn.LayerNorm(dim)
+
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
+ self.to_kv = nn.Linear(dim if kv_dim is None else kv_dim, inner_dim * 2, bias=False)
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
+
+ def forward(self, x, latents):
+ """
+ Args:
+ x (torch.Tensor): image features
+ shape (b, n1, D)
+ latent (torch.Tensor): latent features
+ shape (b, n2, D)
+ """
+ x = self.norm1(x)
+ latents = self.norm2(latents)
+
+ b, seq_len, _ = latents.shape
+
+ q = self.to_q(latents)
+ kv_input = torch.cat((x, latents), dim=-2)
+ k, v = self.to_kv(kv_input).chunk(2, dim=-1)
+
+ q = reshape_tensor(q, self.heads)
+ k = reshape_tensor(k, self.heads)
+ v = reshape_tensor(v, self.heads)
+
+ # attention
+ scale = 1 / math.sqrt(math.sqrt(self.dim_head))
+ weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
+ weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
+ out = weight @ v
+
+ out = out.permute(0, 2, 1, 3).reshape(b, seq_len, -1)
+
+ return self.to_out(out)
+
+
+class IDFormer(nn.Module):
+ """
+ - perceiver resampler like arch (compared with previous MLP-like arch)
+ - we concat id embedding (generated by arcface) and query tokens as latents
+ - latents will attend each other and interact with vit features through cross-attention
+ - vit features are multi-scaled and inserted into IDFormer in order, currently, each scale corresponds to two
+ IDFormer layers
+ """
+ def __init__(
+ self,
+ dim=1024,
+ depth=10,
+ dim_head=64,
+ heads=16,
+ num_id_token=5,
+ num_queries=32,
+ output_dim=2048,
+ ff_mult=4,
+ ):
+ super().__init__()
+
+ self.num_id_token = num_id_token
+ self.dim = dim
+ self.num_queries = num_queries
+ assert depth % 5 == 0
+ self.depth = depth // 5
+ scale = dim ** -0.5
+
+ self.latents = nn.Parameter(torch.randn(1, num_queries, dim) * scale)
+ self.proj_out = nn.Parameter(scale * torch.randn(dim, output_dim))
+
+ self.layers = nn.ModuleList([])
+ for _ in range(depth):
+ self.layers.append(
+ nn.ModuleList(
+ [
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
+ FeedForward(dim=dim, mult=ff_mult),
+ ]
+ )
+ )
+
+ for i in range(5):
+ setattr(
+ self,
+ f'mapping_{i}',
+ nn.Sequential(
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, dim),
+ ),
+ )
+
+ self.id_embedding_mapping = nn.Sequential(
+ nn.Linear(1280, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, 1024),
+ nn.LayerNorm(1024),
+ nn.LeakyReLU(),
+ nn.Linear(1024, dim * num_id_token),
+ )
+
+ def forward(self, x, y):
+
+ latents = self.latents.repeat(x.size(0), 1, 1)
+
+ num_duotu = x.shape[1] if x.ndim == 3 else 1
+
+ x = self.id_embedding_mapping(x)
+ x = x.reshape(-1, self.num_id_token * num_duotu, self.dim)
+
+ latents = torch.cat((latents, x), dim=1)
+
+ for i in range(5):
+ vit_feature = getattr(self, f'mapping_{i}')(y[i])
+ ctx_feature = torch.cat((x, vit_feature), dim=1)
+ for attn, ff in self.layers[i * self.depth: (i + 1) * self.depth]:
+ latents = attn(ctx_feature, latents) + latents
+ latents = ff(latents) + latents
+
+ latents = latents[:, :self.num_queries]
+ latents = latents @ self.proj_out
+ return latents
diff --git a/pulid/pipeline.py b/pulid/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e83a3e221fb5e302812ab5b5a7aed42d8a295b6
--- /dev/null
+++ b/pulid/pipeline.py
@@ -0,0 +1,228 @@
+import gc
+
+import cv2
+import insightface
+import torch
+import torch.nn as nn
+from diffusers import (
+ DPMSolverMultistepScheduler,
+ StableDiffusionXLPipeline,
+ UNet2DConditionModel,
+)
+from facexlib.parsing import init_parsing_model
+from facexlib.utils.face_restoration_helper import FaceRestoreHelper
+from huggingface_hub import hf_hub_download, snapshot_download
+from insightface.app import FaceAnalysis
+from safetensors.torch import load_file
+from torchvision.transforms import InterpolationMode
+from torchvision.transforms.functional import normalize, resize
+
+from eva_clip import create_model_and_transforms
+from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
+from pulid.encoders import IDEncoder
+from pulid.utils import img2tensor, is_torch2_available, tensor2img
+
+if is_torch2_available():
+ from pulid.attention_processor import AttnProcessor2_0 as AttnProcessor
+ from pulid.attention_processor import IDAttnProcessor2_0 as IDAttnProcessor
+else:
+ from pulid.attention_processor import AttnProcessor, IDAttnProcessor
+
+
+class PuLIDPipeline:
+ def __init__(self, *args, **kwargs):
+ super().__init__()
+ self.device = 'cuda'
+ sdxl_base_repo = 'stabilityai/stable-diffusion-xl-base-1.0'
+ sdxl_lightning_repo = 'ByteDance/SDXL-Lightning'
+ self.sdxl_base_repo = sdxl_base_repo
+
+ # load base model
+ unet = UNet2DConditionModel.from_config(sdxl_base_repo, subfolder='unet').to(self.device, torch.float16)
+ unet.load_state_dict(
+ load_file(
+ hf_hub_download(sdxl_lightning_repo, 'sdxl_lightning_4step_unet.safetensors'), device=self.device
+ )
+ )
+ self.hack_unet_attn_layers(unet)
+ self.pipe = StableDiffusionXLPipeline.from_pretrained(
+ sdxl_base_repo, unet=unet, torch_dtype=torch.float16, variant="fp16"
+ ).to(self.device)
+ self.pipe.watermark = None
+
+ # scheduler
+ self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(
+ self.pipe.scheduler.config, timestep_spacing="trailing"
+ )
+
+ # ID adapters
+ self.id_adapter = IDEncoder().to(self.device)
+
+ # preprocessors
+ # face align and parsing
+ self.face_helper = FaceRestoreHelper(
+ upscale_factor=1,
+ face_size=512,
+ crop_ratio=(1, 1),
+ det_model='retinaface_resnet50',
+ save_ext='png',
+ device=self.device,
+ )
+ self.face_helper.face_parse = None
+ self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device)
+ # clip-vit backbone
+ model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True)
+ model = model.visual
+ self.clip_vision_model = model.to(self.device)
+ eva_transform_mean = getattr(self.clip_vision_model, 'image_mean', OPENAI_DATASET_MEAN)
+ eva_transform_std = getattr(self.clip_vision_model, 'image_std', OPENAI_DATASET_STD)
+ if not isinstance(eva_transform_mean, (list, tuple)):
+ eva_transform_mean = (eva_transform_mean,) * 3
+ if not isinstance(eva_transform_std, (list, tuple)):
+ eva_transform_std = (eva_transform_std,) * 3
+ self.eva_transform_mean = eva_transform_mean
+ self.eva_transform_std = eva_transform_std
+ # antelopev2
+ snapshot_download('DIAMONIK7777/antelopev2', local_dir='models/antelopev2')
+ self.app = FaceAnalysis(
+ name='antelopev2', root='.', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
+ )
+ self.app.prepare(ctx_id=0, det_size=(640, 640))
+ self.handler_ante = insightface.model_zoo.get_model('models/antelopev2/glintr100.onnx')
+ self.handler_ante.prepare(ctx_id=0)
+
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ self.load_pretrain()
+
+ # other configs
+ self.debug_img_list = []
+
+ def hack_unet_attn_layers(self, unet):
+ id_adapter_attn_procs = {}
+ for name, _ in unet.attn_processors.items():
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
+ if name.startswith("mid_block"):
+ hidden_size = unet.config.block_out_channels[-1]
+ elif name.startswith("up_blocks"):
+ block_id = int(name[len("up_blocks.")])
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
+ elif name.startswith("down_blocks"):
+ block_id = int(name[len("down_blocks.")])
+ hidden_size = unet.config.block_out_channels[block_id]
+ if cross_attention_dim is not None:
+ id_adapter_attn_procs[name] = IDAttnProcessor(
+ hidden_size=hidden_size,
+ cross_attention_dim=cross_attention_dim,
+ ).to(unet.device)
+ else:
+ id_adapter_attn_procs[name] = AttnProcessor()
+ unet.set_attn_processor(id_adapter_attn_procs)
+ self.id_adapter_attn_layers = nn.ModuleList(unet.attn_processors.values())
+
+ def load_pretrain(self):
+ hf_hub_download('guozinan/PuLID', 'pulid_v1.bin', local_dir='models')
+ ckpt_path = 'models/pulid_v1.bin'
+ state_dict = torch.load(ckpt_path, map_location='cpu')
+ state_dict_dict = {}
+ for k, v in state_dict.items():
+ module = k.split('.')[0]
+ state_dict_dict.setdefault(module, {})
+ new_k = k[len(module) + 1 :]
+ state_dict_dict[module][new_k] = v
+
+ for module in state_dict_dict:
+ print(f'loading from {module}')
+ getattr(self, module).load_state_dict(state_dict_dict[module], strict=True)
+
+ def to_gray(self, img):
+ x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3]
+ x = x.repeat(1, 3, 1, 1)
+ return x
+
+ def get_id_embedding(self, image):
+ """
+ Args:
+ image: numpy rgb image, range [0, 255]
+ """
+ self.face_helper.clean_all()
+ image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ # get antelopev2 embedding
+ face_info = self.app.get(image_bgr)
+ if len(face_info) > 0:
+ face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[
+ -1
+ ] # only use the maximum face
+ id_ante_embedding = face_info['embedding']
+ self.debug_img_list.append(
+ image[
+ int(face_info['bbox'][1]) : int(face_info['bbox'][3]),
+ int(face_info['bbox'][0]) : int(face_info['bbox'][2]),
+ ]
+ )
+ else:
+ id_ante_embedding = None
+
+ # using facexlib to detect and align face
+ self.face_helper.read_image(image_bgr)
+ self.face_helper.get_face_landmarks_5(only_center_face=True)
+ self.face_helper.align_warp_face()
+ if len(self.face_helper.cropped_faces) == 0:
+ raise RuntimeError('facexlib align face fail')
+ align_face = self.face_helper.cropped_faces[0]
+ # incase insightface didn't detect face
+ if id_ante_embedding is None:
+ print('fail to detect face using insightface, extract embedding on align face')
+ id_ante_embedding = self.handler_ante.get_feat(align_face)
+
+ id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device)
+ if id_ante_embedding.ndim == 1:
+ id_ante_embedding = id_ante_embedding.unsqueeze(0)
+
+ # parsing
+ input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0
+ input = input.to(self.device)
+ parsing_out = self.face_helper.face_parse(normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[0]
+ parsing_out = parsing_out.argmax(dim=1, keepdim=True)
+ bg_label = [0, 16, 18, 7, 8, 9, 14, 15]
+ bg = sum(parsing_out == i for i in bg_label).bool()
+ white_image = torch.ones_like(input)
+ # only keep the face features
+ face_features_image = torch.where(bg, white_image, self.to_gray(input))
+ self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False))
+
+ # transform img before sending to eva-clip-vit
+ face_features_image = resize(face_features_image, self.clip_vision_model.image_size, InterpolationMode.BICUBIC)
+ face_features_image = normalize(face_features_image, self.eva_transform_mean, self.eva_transform_std)
+ id_cond_vit, id_vit_hidden = self.clip_vision_model(
+ face_features_image, return_all_features=False, return_hidden=True, shuffle=False
+ )
+ id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True)
+ id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm)
+
+ id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1)
+ id_uncond = torch.zeros_like(id_cond)
+ id_vit_hidden_uncond = []
+ for layer_idx in range(0, len(id_vit_hidden)):
+ id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden[layer_idx]))
+
+ id_embedding = self.id_adapter(id_cond, id_vit_hidden)
+ uncond_id_embedding = self.id_adapter(id_uncond, id_vit_hidden_uncond)
+
+ # return id_embedding
+ return torch.cat((uncond_id_embedding, id_embedding), dim=0)
+
+ def inference(self, prompt, size, prompt_n='', image_embedding=None, id_scale=1.0, guidance_scale=1.2, steps=4):
+ images = self.pipe(
+ prompt=prompt,
+ negative_prompt=prompt_n,
+ num_images_per_prompt=size[0],
+ height=size[1],
+ width=size[2],
+ num_inference_steps=steps,
+ guidance_scale=guidance_scale,
+ cross_attention_kwargs={'id_embedding': image_embedding, 'id_scale': id_scale},
+ ).images
+
+ return images
diff --git a/pulid/pipeline_flux.py b/pulid/pipeline_flux.py
new file mode 100644
index 0000000000000000000000000000000000000000..706514cbef319d3822c2f2fc52bff1bc9383f29b
--- /dev/null
+++ b/pulid/pipeline_flux.py
@@ -0,0 +1,194 @@
+import gc
+
+import cv2
+import insightface
+import torch
+import torch.nn as nn
+from facexlib.parsing import init_parsing_model
+from facexlib.utils.face_restoration_helper import FaceRestoreHelper
+from huggingface_hub import hf_hub_download, snapshot_download
+from insightface.app import FaceAnalysis
+from safetensors.torch import load_file
+from torchvision.transforms import InterpolationMode
+from torchvision.transforms.functional import normalize, resize
+
+from eva_clip import create_model_and_transforms
+from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
+from pulid.encoders_transformer import IDFormer, PerceiverAttentionCA
+from pulid.utils import img2tensor, tensor2img
+
+
+class PuLIDPipeline(nn.Module):
+ def __init__(self, dit, device, weight_dtype=torch.bfloat16, onnx_provider='gpu', *args, **kwargs):
+ super().__init__()
+ self.device = device
+ self.weight_dtype = weight_dtype
+ double_interval = 2
+ single_interval = 4
+
+ # init encoder
+ self.pulid_encoder = IDFormer().to(self.device, self.weight_dtype)
+
+ num_ca = 19 // double_interval + 38 // single_interval
+ if 19 % double_interval != 0:
+ num_ca += 1
+ if 38 % single_interval != 0:
+ num_ca += 1
+ self.pulid_ca = nn.ModuleList([
+ PerceiverAttentionCA().to(self.device, self.weight_dtype) for _ in range(num_ca)
+ ])
+
+ dit.pulid_ca = self.pulid_ca
+ dit.pulid_double_interval = double_interval
+ dit.pulid_single_interval = single_interval
+
+ # preprocessors
+ # face align and parsing
+ self.face_helper = FaceRestoreHelper(
+ upscale_factor=1,
+ face_size=512,
+ crop_ratio=(1, 1),
+ det_model='retinaface_resnet50',
+ save_ext='png',
+ device=self.device,
+ )
+ self.face_helper.face_parse = None
+ self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device)
+ # clip-vit backbone
+ model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True)
+ model = model.visual
+ self.clip_vision_model = model.to(self.device, dtype=self.weight_dtype)
+ eva_transform_mean = getattr(self.clip_vision_model, 'image_mean', OPENAI_DATASET_MEAN)
+ eva_transform_std = getattr(self.clip_vision_model, 'image_std', OPENAI_DATASET_STD)
+ if not isinstance(eva_transform_mean, (list, tuple)):
+ eva_transform_mean = (eva_transform_mean,) * 3
+ if not isinstance(eva_transform_std, (list, tuple)):
+ eva_transform_std = (eva_transform_std,) * 3
+ self.eva_transform_mean = eva_transform_mean
+ self.eva_transform_std = eva_transform_std
+ # antelopev2
+ snapshot_download('DIAMONIK7777/antelopev2', local_dir='models/antelopev2')
+ providers = ['CPUExecutionProvider'] if onnx_provider == 'cpu' \
+ else ['CUDAExecutionProvider', 'CPUExecutionProvider']
+ self.app = FaceAnalysis(name='antelopev2', root='.', providers=providers)
+ self.app.prepare(ctx_id=0, det_size=(640, 640))
+ self.handler_ante = insightface.model_zoo.get_model('models/antelopev2/glintr100.onnx',
+ providers=providers)
+ self.handler_ante.prepare(ctx_id=0)
+
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ # self.load_pretrain()
+
+ # other configs
+ self.debug_img_list = []
+
+ def components_to_device(self, device):
+ # everything but pulid_ca
+ self.face_helper.face_det = self.face_helper.face_det.to(device)
+ self.face_helper.face_parse = self.face_helper.face_parse.to(device)
+ self.clip_vision_model = self.clip_vision_model.to(device)
+ self.pulid_encoder = self.pulid_encoder.to(device)
+
+ def load_pretrain(self, pretrain_path=None, version='v0.9.0'):
+ hf_hub_download('guozinan/PuLID', f'pulid_flux_{version}.safetensors', local_dir='models')
+ ckpt_path = f'models/pulid_flux_{version}.safetensors'
+ if pretrain_path is not None:
+ ckpt_path = pretrain_path
+ state_dict = load_file(ckpt_path)
+ state_dict_dict = {}
+ for k, v in state_dict.items():
+ module = k.split('.')[0]
+ state_dict_dict.setdefault(module, {})
+ new_k = k[len(module) + 1:]
+ state_dict_dict[module][new_k] = v
+
+ for module in state_dict_dict:
+ print(f'loading from {module}')
+ getattr(self, module).load_state_dict(state_dict_dict[module], strict=True)
+
+ del state_dict
+ del state_dict_dict
+
+ def to_gray(self, img):
+ x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3]
+ x = x.repeat(1, 3, 1, 1)
+ return x
+
+ @torch.no_grad()
+ def get_id_embedding(self, image, cal_uncond=False):
+ """
+ Args:
+ image: numpy rgb image, range [0, 255]
+ """
+ self.face_helper.clean_all()
+ self.debug_img_list = []
+ image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ # get antelopev2 embedding
+ face_info = self.app.get(image_bgr)
+ if len(face_info) > 0:
+ face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1]))[
+ -1
+ ] # only use the maximum face
+ id_ante_embedding = face_info['embedding']
+ self.debug_img_list.append(
+ image[
+ int(face_info['bbox'][1]) : int(face_info['bbox'][3]),
+ int(face_info['bbox'][0]) : int(face_info['bbox'][2]),
+ ]
+ )
+ else:
+ id_ante_embedding = None
+
+ # using facexlib to detect and align face
+ self.face_helper.read_image(image_bgr)
+ self.face_helper.get_face_landmarks_5(only_center_face=True)
+ self.face_helper.align_warp_face()
+ if len(self.face_helper.cropped_faces) == 0:
+ raise RuntimeError('facexlib align face fail')
+ align_face = self.face_helper.cropped_faces[0]
+ # incase insightface didn't detect face
+ if id_ante_embedding is None:
+ print('fail to detect face using insightface, extract embedding on align face')
+ id_ante_embedding = self.handler_ante.get_feat(align_face)
+
+ id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device, self.weight_dtype)
+ if id_ante_embedding.ndim == 1:
+ id_ante_embedding = id_ante_embedding.unsqueeze(0)
+
+ # parsing
+ input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0
+ input = input.to(self.device)
+ parsing_out = self.face_helper.face_parse(normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[0]
+ parsing_out = parsing_out.argmax(dim=1, keepdim=True)
+ bg_label = [0, 16, 18, 7, 8, 9, 14, 15]
+ bg = sum(parsing_out == i for i in bg_label).bool()
+ white_image = torch.ones_like(input)
+ # only keep the face features
+ face_features_image = torch.where(bg, white_image, self.to_gray(input))
+ self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False))
+
+ # transform img before sending to eva-clip-vit
+ face_features_image = resize(face_features_image, self.clip_vision_model.image_size, InterpolationMode.BICUBIC)
+ face_features_image = normalize(face_features_image, self.eva_transform_mean, self.eva_transform_std)
+ id_cond_vit, id_vit_hidden = self.clip_vision_model(
+ face_features_image.to(self.weight_dtype), return_all_features=False, return_hidden=True, shuffle=False
+ )
+ id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True)
+ id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm)
+
+ id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1)
+
+ id_embedding = self.pulid_encoder(id_cond, id_vit_hidden)
+
+ if not cal_uncond:
+ return id_embedding, None
+
+ id_uncond = torch.zeros_like(id_cond)
+ id_vit_hidden_uncond = []
+ for layer_idx in range(0, len(id_vit_hidden)):
+ id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden[layer_idx]))
+ uncond_id_embedding = self.pulid_encoder(id_uncond, id_vit_hidden_uncond)
+
+ return id_embedding, uncond_id_embedding
diff --git a/pulid/pipeline_v1_1.py b/pulid/pipeline_v1_1.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ca68cf11f509a2168c6a075a56f73e1ecf0a074
--- /dev/null
+++ b/pulid/pipeline_v1_1.py
@@ -0,0 +1,324 @@
+import gc
+
+import cv2
+import insightface
+import numpy as np
+import torch
+import torch.nn as nn
+from basicsr.utils import img2tensor, tensor2img
+from diffusers import DPMSolverMultistepScheduler, StableDiffusionXLPipeline
+from facexlib.parsing import init_parsing_model
+from facexlib.utils.face_restoration_helper import FaceRestoreHelper
+
+from huggingface_hub import hf_hub_download, snapshot_download
+from insightface.app import FaceAnalysis
+from safetensors.torch import load_file
+from torchvision.transforms import InterpolationMode
+from torchvision.transforms.functional import normalize, resize
+
+from eva_clip import create_model_and_transforms
+from eva_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
+from pulid.encoders_transformer import IDFormer
+from pulid.utils import is_torch2_available, sample_dpmpp_2m, sample_dpmpp_sde
+
+if is_torch2_available():
+ from pulid.attention_processor import AttnProcessor2_0 as AttnProcessor
+ from pulid.attention_processor import IDAttnProcessor2_0 as IDAttnProcessor
+else:
+ from pulid.attention_processor import AttnProcessor, IDAttnProcessor
+
+
+class PuLIDPipeline:
+ def __init__(self, sdxl_repo='Lykon/dreamshaper-xl-lightning', sampler='dpmpp_sde', *args, **kwargs):
+ super().__init__()
+ self.device = 'cuda'
+
+ # load base model
+ self.pipe = StableDiffusionXLPipeline.from_pretrained(sdxl_repo, torch_dtype=torch.float16, variant="fp16").to(
+ self.device
+ )
+ self.pipe.watermark = None
+ self.hack_unet_attn_layers(self.pipe.unet)
+
+ # scheduler
+ self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
+
+ # ID adapters
+ self.id_adapter = IDFormer().to(self.device)
+
+ # preprocessors
+ # face align and parsing
+ self.face_helper = FaceRestoreHelper(
+ upscale_factor=1,
+ face_size=512,
+ crop_ratio=(1, 1),
+ det_model='retinaface_resnet50',
+ save_ext='png',
+ device=self.device,
+ )
+ self.face_helper.face_parse = None
+ self.face_helper.face_parse = init_parsing_model(model_name='bisenet', device=self.device)
+ # clip-vit backbone
+ model, _, _ = create_model_and_transforms('EVA02-CLIP-L-14-336', 'eva_clip', force_custom_clip=True)
+ model = model.visual
+ self.clip_vision_model = model.to(self.device)
+ eva_transform_mean = getattr(self.clip_vision_model, 'image_mean', OPENAI_DATASET_MEAN)
+ eva_transform_std = getattr(self.clip_vision_model, 'image_std', OPENAI_DATASET_STD)
+ if not isinstance(eva_transform_mean, (list, tuple)):
+ eva_transform_mean = (eva_transform_mean,) * 3
+ if not isinstance(eva_transform_std, (list, tuple)):
+ eva_transform_std = (eva_transform_std,) * 3
+ self.eva_transform_mean = eva_transform_mean
+ self.eva_transform_std = eva_transform_std
+ # antelopev2
+ snapshot_download('DIAMONIK7777/antelopev2', local_dir='models/antelopev2')
+ self.app = FaceAnalysis(
+ name='antelopev2', root='.', providers=['CUDAExecutionProvider', 'CPUExecutionProvider']
+ )
+ self.app.prepare(ctx_id=0, det_size=(640, 640))
+ self.handler_ante = insightface.model_zoo.get_model('models/antelopev2/glintr100.onnx')
+ self.handler_ante.prepare(ctx_id=0)
+
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ self.load_pretrain()
+
+ # other configs
+ self.debug_img_list = []
+
+ # karras schedule related code, borrow from lllyasviel/Omost
+ linear_start = 0.00085
+ linear_end = 0.012
+ timesteps = 1000
+ betas = torch.linspace(linear_start**0.5, linear_end**0.5, timesteps, dtype=torch.float64) ** 2
+ alphas = 1.0 - betas
+ alphas_cumprod = torch.tensor(np.cumprod(alphas, axis=0), dtype=torch.float32)
+
+ self.sigmas = ((1 - alphas_cumprod) / alphas_cumprod) ** 0.5
+ self.log_sigmas = self.sigmas.log()
+ self.sigma_data = 1.0
+
+ if sampler == 'dpmpp_sde':
+ self.sampler = sample_dpmpp_sde
+ elif sampler == 'dpmpp_2m':
+ self.sampler = sample_dpmpp_2m
+ else:
+ raise NotImplementedError(f'sampler {sampler} not implemented')
+
+ @property
+ def sigma_min(self):
+ return self.sigmas[0]
+
+ @property
+ def sigma_max(self):
+ return self.sigmas[-1]
+
+ def timestep(self, sigma):
+ log_sigma = sigma.log()
+ dists = log_sigma.to(self.log_sigmas.device) - self.log_sigmas[:, None]
+ return dists.abs().argmin(dim=0).view(sigma.shape).to(sigma.device)
+
+ def get_sigmas_karras(self, n, rho=7.0):
+ ramp = torch.linspace(0, 1, n)
+ min_inv_rho = self.sigma_min ** (1 / rho)
+ max_inv_rho = self.sigma_max ** (1 / rho)
+ sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
+ return torch.cat([sigmas, sigmas.new_zeros([1])])
+
+ def hack_unet_attn_layers(self, unet):
+ id_adapter_attn_procs = {}
+ for name, _ in unet.attn_processors.items():
+ cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
+ if name.startswith("mid_block"):
+ hidden_size = unet.config.block_out_channels[-1]
+ elif name.startswith("up_blocks"):
+ block_id = int(name[len("up_blocks.")])
+ hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
+ elif name.startswith("down_blocks"):
+ block_id = int(name[len("down_blocks.")])
+ hidden_size = unet.config.block_out_channels[block_id]
+ if cross_attention_dim is not None:
+ id_adapter_attn_procs[name] = IDAttnProcessor(
+ hidden_size=hidden_size,
+ cross_attention_dim=cross_attention_dim,
+ ).to(unet.device)
+ else:
+ id_adapter_attn_procs[name] = AttnProcessor()
+ unet.set_attn_processor(id_adapter_attn_procs)
+ self.id_adapter_attn_layers = nn.ModuleList(unet.attn_processors.values())
+
+ def load_pretrain(self):
+ hf_hub_download('guozinan/PuLID', 'pulid_v1.1.safetensors', local_dir='models')
+ ckpt_path = 'models/pulid_v1.1.safetensors'
+ state_dict = load_file(ckpt_path)
+ state_dict_dict = {}
+ for k, v in state_dict.items():
+ module = k.split('.')[0]
+ state_dict_dict.setdefault(module, {})
+ new_k = k[len(module) + 1 :]
+ state_dict_dict[module][new_k] = v
+
+ for module in state_dict_dict:
+ print(f'loading from {module}')
+ getattr(self, module).load_state_dict(state_dict_dict[module], strict=True)
+
+ def to_gray(self, img):
+ x = 0.299 * img[:, 0:1] + 0.587 * img[:, 1:2] + 0.114 * img[:, 2:3]
+ x = x.repeat(1, 3, 1, 1)
+ return x
+
+ def get_id_embedding(self, image_list):
+ """
+ Args:
+ image in image_list: numpy rgb image, range [0, 255]
+ """
+ id_cond_list = []
+ id_vit_hidden_list = []
+ for ii, image in enumerate(image_list):
+ self.face_helper.clean_all()
+ image_bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
+ # get antelopev2 embedding
+ face_info = self.app.get(image_bgr)
+ if len(face_info) > 0:
+ face_info = sorted(
+ face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (x['bbox'][3] - x['bbox'][1])
+ )[
+ -1
+ ] # only use the maximum face
+ id_ante_embedding = face_info['embedding']
+ self.debug_img_list.append(
+ image[
+ int(face_info['bbox'][1]) : int(face_info['bbox'][3]),
+ int(face_info['bbox'][0]) : int(face_info['bbox'][2]),
+ ]
+ )
+ else:
+ id_ante_embedding = None
+
+ # using facexlib to detect and align face
+ self.face_helper.read_image(image_bgr)
+ self.face_helper.get_face_landmarks_5(only_center_face=True)
+ self.face_helper.align_warp_face()
+ if len(self.face_helper.cropped_faces) == 0:
+ raise RuntimeError('facexlib align face fail')
+ align_face = self.face_helper.cropped_faces[0]
+ # incase insightface didn't detect face
+ if id_ante_embedding is None:
+ print('fail to detect face using insightface, extract embedding on align face')
+ id_ante_embedding = self.handler_ante.get_feat(align_face)
+
+ id_ante_embedding = torch.from_numpy(id_ante_embedding).to(self.device)
+ if id_ante_embedding.ndim == 1:
+ id_ante_embedding = id_ante_embedding.unsqueeze(0)
+
+ # parsing
+ input = img2tensor(align_face, bgr2rgb=True).unsqueeze(0) / 255.0
+ input = input.to(self.device)
+ parsing_out = self.face_helper.face_parse(normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]))[
+ 0
+ ]
+ parsing_out = parsing_out.argmax(dim=1, keepdim=True)
+ bg_label = [0, 16, 18, 7, 8, 9, 14, 15]
+ bg = sum(parsing_out == i for i in bg_label).bool()
+ white_image = torch.ones_like(input)
+ # only keep the face features
+ face_features_image = torch.where(bg, white_image, self.to_gray(input))
+ self.debug_img_list.append(tensor2img(face_features_image, rgb2bgr=False))
+
+ # transform img before sending to eva-clip-vit
+ face_features_image = resize(
+ face_features_image, self.clip_vision_model.image_size, InterpolationMode.BICUBIC
+ )
+ face_features_image = normalize(face_features_image, self.eva_transform_mean, self.eva_transform_std)
+ id_cond_vit, id_vit_hidden = self.clip_vision_model(
+ face_features_image, return_all_features=False, return_hidden=True, shuffle=False
+ )
+ id_cond_vit_norm = torch.norm(id_cond_vit, 2, 1, True)
+ id_cond_vit = torch.div(id_cond_vit, id_cond_vit_norm)
+
+ id_cond = torch.cat([id_ante_embedding, id_cond_vit], dim=-1)
+
+ id_cond_list.append(id_cond)
+ id_vit_hidden_list.append(id_vit_hidden)
+
+ id_uncond = torch.zeros_like(id_cond_list[0])
+ id_vit_hidden_uncond = []
+ for layer_idx in range(0, len(id_vit_hidden_list[0])):
+ id_vit_hidden_uncond.append(torch.zeros_like(id_vit_hidden_list[0][layer_idx]))
+
+ id_cond = torch.stack(id_cond_list, dim=1)
+ id_vit_hidden = id_vit_hidden_list[0]
+ for i in range(1, len(image_list)):
+ for j, x in enumerate(id_vit_hidden_list[i]):
+ id_vit_hidden[j] = torch.cat([id_vit_hidden[j], x], dim=1)
+ id_embedding = self.id_adapter(id_cond, id_vit_hidden)
+ uncond_id_embedding = self.id_adapter(id_uncond, id_vit_hidden_uncond)
+
+ # return id_embedding
+ return uncond_id_embedding, id_embedding
+
+ def __call__(self, x, sigma, **extra_args):
+ x_ddim_space = x / (sigma[:, None, None, None] ** 2 + self.sigma_data**2) ** 0.5
+ t = self.timestep(sigma)
+ cfg_scale = extra_args['cfg_scale']
+ eps_positive = self.pipe.unet(x_ddim_space, t, return_dict=False, **extra_args['positive'])[0]
+ eps_negative = self.pipe.unet(x_ddim_space, t, return_dict=False, **extra_args['negative'])[0]
+ noise_pred = eps_negative + cfg_scale * (eps_positive - eps_negative)
+ return x - noise_pred * sigma[:, None, None, None]
+
+ def inference(
+ self,
+ prompt,
+ size,
+ prompt_n='',
+ id_embedding=None,
+ uncond_id_embedding=None,
+ id_scale=1.0,
+ guidance_scale=1.2,
+ steps=4,
+ seed=-1,
+ ):
+
+ # sigmas
+ sigmas = self.get_sigmas_karras(steps).to(self.device)
+
+ # latents
+ noise = torch.randn((size[0], 4, size[1] // 8, size[2] // 8), device="cpu", generator=torch.manual_seed(seed))
+ noise = noise.to(dtype=self.pipe.unet.dtype, device=self.device)
+ latents = noise * sigmas[0].to(noise)
+
+ (
+ prompt_embeds,
+ negative_prompt_embeds,
+ pooled_prompt_embeds,
+ negative_pooled_prompt_embeds,
+ ) = self.pipe.encode_prompt(
+ prompt=prompt,
+ negative_prompt=prompt_n,
+ )
+
+ add_time_ids = list((size[1], size[2]) + (0, 0) + (size[1], size[2]))
+ add_time_ids = torch.tensor([add_time_ids], dtype=self.pipe.unet.dtype, device=self.device)
+ add_neg_time_ids = add_time_ids.clone()
+
+ sampler_kwargs = dict(
+ cfg_scale=guidance_scale,
+ positive=dict(
+ encoder_hidden_states=prompt_embeds,
+ added_cond_kwargs={"text_embeds": pooled_prompt_embeds, "time_ids": add_time_ids},
+ cross_attention_kwargs={'id_embedding': id_embedding, 'id_scale': id_scale},
+ ),
+ negative=dict(
+ encoder_hidden_states=negative_prompt_embeds,
+ added_cond_kwargs={"text_embeds": negative_pooled_prompt_embeds, "time_ids": add_neg_time_ids},
+ cross_attention_kwargs={'id_embedding': uncond_id_embedding, 'id_scale': id_scale},
+ ),
+ )
+
+ latents = self.sampler(self, latents, sigmas, extra_args=sampler_kwargs, disable=False)
+ latents = latents.to(dtype=self.pipe.vae.dtype, device=self.device) / self.pipe.vae.config.scaling_factor
+ images = self.pipe.vae.decode(latents).sample
+ images = self.pipe.image_processor.postprocess(images, output_type='pil')
+
+ return images
diff --git a/pulid/utils.py b/pulid/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5514258b2d767eef9e260fad585ad4be03794573
--- /dev/null
+++ b/pulid/utils.py
@@ -0,0 +1,339 @@
+import importlib
+import math
+import os
+import random
+
+import cv2
+import numpy as np
+import torch
+import torch.nn.functional as F
+import torchsde
+from torchvision.utils import make_grid
+from tqdm.auto import trange
+from transformers import PretrainedConfig
+
+
+def seed_everything(seed):
+ os.environ["PL_GLOBAL_SEED"] = str(seed)
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed)
+ torch.backends.cudnn.deterministic = True
+ torch.backends.cudnn.benchmark = False
+
+
+def is_torch2_available():
+ return hasattr(F, "scaled_dot_product_attention")
+
+
+def instantiate_from_config(config):
+ if "target" not in config:
+ if config == '__is_first_stage__' or config == "__is_unconditional__":
+ return None
+ raise KeyError("Expected key `target` to instantiate.")
+ return get_obj_from_str(config["target"])(**config.get("params", {}))
+
+
+def get_obj_from_str(string, reload=False):
+ module, cls = string.rsplit(".", 1)
+ if reload:
+ module_imp = importlib.import_module(module)
+ importlib.reload(module_imp)
+ return getattr(importlib.import_module(module, package=None), cls)
+
+
+def drop_seq_token(seq, drop_rate=0.5):
+ idx = torch.randperm(seq.size(1))
+ num_keep_tokens = int(len(idx) * (1 - drop_rate))
+ idx = idx[:num_keep_tokens]
+ seq = seq[:, idx]
+ return seq
+
+
+def import_model_class_from_model_name_or_path(
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
+):
+ text_encoder_config = PretrainedConfig.from_pretrained(
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
+ )
+ model_class = text_encoder_config.architectures[0]
+
+ if model_class == "CLIPTextModel":
+ from transformers import CLIPTextModel
+
+ return CLIPTextModel
+ elif model_class == "CLIPTextModelWithProjection": # noqa RET505
+ from transformers import CLIPTextModelWithProjection
+
+ return CLIPTextModelWithProjection
+ else:
+ raise ValueError(f"{model_class} is not supported.")
+
+
+def resize_numpy_image_long(image, resize_long_edge=768):
+ h, w = image.shape[:2]
+ if max(h, w) <= resize_long_edge:
+ return image
+ k = resize_long_edge / max(h, w)
+ h = int(h * k)
+ w = int(w * k)
+ image = cv2.resize(image, (w, h), interpolation=cv2.INTER_LANCZOS4)
+ return image
+
+
+# from basicsr
+def img2tensor(imgs, bgr2rgb=True, float32=True):
+ """Numpy array to tensor.
+
+ Args:
+ imgs (list[ndarray] | ndarray): Input images.
+ bgr2rgb (bool): Whether to change bgr to rgb.
+ float32 (bool): Whether to change to float32.
+
+ Returns:
+ list[tensor] | tensor: Tensor images. If returned results only have
+ one element, just return tensor.
+ """
+
+ def _totensor(img, bgr2rgb, float32):
+ if img.shape[2] == 3 and bgr2rgb:
+ if img.dtype == 'float64':
+ img = img.astype('float32')
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ img = torch.from_numpy(img.transpose(2, 0, 1))
+ if float32:
+ img = img.float()
+ return img
+
+ if isinstance(imgs, list):
+ return [_totensor(img, bgr2rgb, float32) for img in imgs]
+ return _totensor(imgs, bgr2rgb, float32)
+
+
+def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
+ """Convert torch Tensors into image numpy arrays.
+
+ After clamping to [min, max], values will be normalized to [0, 1].
+
+ Args:
+ tensor (Tensor or list[Tensor]): Accept shapes:
+ 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
+ 2) 3D Tensor of shape (3/1 x H x W);
+ 3) 2D Tensor of shape (H x W).
+ Tensor channel should be in RGB order.
+ rgb2bgr (bool): Whether to change rgb to bgr.
+ out_type (numpy type): output types. If ``np.uint8``, transform outputs
+ to uint8 type with range [0, 255]; otherwise, float type with
+ range [0, 1]. Default: ``np.uint8``.
+ min_max (tuple[int]): min and max values for clamp.
+
+ Returns:
+ (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
+ shape (H x W). The channel order is BGR.
+ """
+ if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
+ raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
+
+ if torch.is_tensor(tensor):
+ tensor = [tensor]
+ result = []
+ for _tensor in tensor:
+ _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
+ _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
+
+ n_dim = _tensor.dim()
+ if n_dim == 4:
+ img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
+ img_np = img_np.transpose(1, 2, 0)
+ if rgb2bgr:
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
+ elif n_dim == 3:
+ img_np = _tensor.numpy()
+ img_np = img_np.transpose(1, 2, 0)
+ if img_np.shape[2] == 1: # gray image
+ img_np = np.squeeze(img_np, axis=2)
+ else:
+ if rgb2bgr:
+ img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
+ elif n_dim == 2:
+ img_np = _tensor.numpy()
+ else:
+ raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')
+ if out_type == np.uint8:
+ # Unlike MATLAB, numpy.unit8() WILL NOT round by default.
+ img_np = (img_np * 255.0).round()
+ img_np = img_np.astype(out_type)
+ result.append(img_np)
+ if len(result) == 1:
+ result = result[0]
+ return result
+
+
+# We didn't find a correct configuration to make the diffusers scheduler align with dpm++2m (karras) in ComfyUI,
+# so we copied the ComfyUI code directly.
+
+
+def append_dims(x, target_dims):
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
+ dims_to_append = target_dims - x.ndim
+ if dims_to_append < 0:
+ raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
+ expanded = x[(...,) + (None,) * dims_to_append]
+ # MPS will get inf values if it tries to index into the new axes, but detaching fixes this.
+ # https://github.com/pytorch/pytorch/issues/84364
+ return expanded.detach().clone() if expanded.device.type == 'mps' else expanded
+
+
+def to_d(x, sigma, denoised):
+ """Converts a denoiser output to a Karras ODE derivative."""
+ return (x - denoised) / append_dims(sigma, x.ndim)
+
+
+def get_ancestral_step(sigma_from, sigma_to, eta=1.0):
+ """Calculates the noise level (sigma_down) to step down to and the amount
+ of noise to add (sigma_up) when doing an ancestral sampling step."""
+ if not eta:
+ return sigma_to, 0.0
+ sigma_up = min(sigma_to, eta * (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5)
+ sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
+ return sigma_down, sigma_up
+
+
+class BatchedBrownianTree:
+ """A wrapper around torchsde.BrownianTree that enables batches of entropy."""
+
+ def __init__(self, x, t0, t1, seed=None, **kwargs):
+ self.cpu_tree = True
+ if "cpu" in kwargs:
+ self.cpu_tree = kwargs.pop("cpu")
+ t0, t1, self.sign = self.sort(t0, t1)
+ w0 = kwargs.get('w0', torch.zeros_like(x))
+ if seed is None:
+ seed = torch.randint(0, 2**63 - 1, []).item()
+ self.batched = True
+ try:
+ assert len(seed) == x.shape[0]
+ w0 = w0[0]
+ except TypeError:
+ seed = [seed]
+ self.batched = False
+ if self.cpu_tree:
+ self.trees = [torchsde.BrownianTree(t0.cpu(), w0.cpu(), t1.cpu(), entropy=s, **kwargs) for s in seed]
+ else:
+ self.trees = [torchsde.BrownianTree(t0, w0, t1, entropy=s, **kwargs) for s in seed]
+
+ @staticmethod
+ def sort(a, b):
+ return (a, b, 1) if a < b else (b, a, -1)
+
+ def __call__(self, t0, t1):
+ t0, t1, sign = self.sort(t0, t1)
+ if self.cpu_tree:
+ w = torch.stack(
+ [tree(t0.cpu().float(), t1.cpu().float()).to(t0.dtype).to(t0.device) for tree in self.trees]
+ ) * (self.sign * sign)
+ else:
+ w = torch.stack([tree(t0, t1) for tree in self.trees]) * (self.sign * sign)
+
+ return w if self.batched else w[0]
+
+
+class BrownianTreeNoiseSampler:
+ """A noise sampler backed by a torchsde.BrownianTree.
+
+ Args:
+ x (Tensor): The tensor whose shape, device and dtype to use to generate
+ random samples.
+ sigma_min (float): The low end of the valid interval.
+ sigma_max (float): The high end of the valid interval.
+ seed (int or List[int]): The random seed. If a list of seeds is
+ supplied instead of a single integer, then the noise sampler will
+ use one BrownianTree per batch item, each with its own seed.
+ transform (callable): A function that maps sigma to the sampler's
+ internal timestep.
+ """
+
+ def __init__(self, x, sigma_min, sigma_max, seed=None, transform=lambda x: x, cpu=False):
+ self.transform = transform
+ t0, t1 = self.transform(torch.as_tensor(sigma_min)), self.transform(torch.as_tensor(sigma_max))
+ self.tree = BatchedBrownianTree(x, t0, t1, seed, cpu=cpu)
+
+ def __call__(self, sigma, sigma_next):
+ t0, t1 = self.transform(torch.as_tensor(sigma)), self.transform(torch.as_tensor(sigma_next))
+ return self.tree(t0, t1) / (t1 - t0).abs().sqrt()
+
+
+@torch.no_grad()
+def sample_dpmpp_2m(model, x, sigmas, extra_args=None, callback=None, disable=None):
+ """DPM-Solver++(2M)."""
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ sigma_fn = lambda t: t.neg().exp()
+ t_fn = lambda sigma: sigma.log().neg()
+ old_denoised = None
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
+ h = t_next - t
+ if old_denoised is None or sigmas[i + 1] == 0:
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised
+ else:
+ h_last = t - t_fn(sigmas[i - 1])
+ r = h_last / h
+ denoised_d = (1 + 1 / (2 * r)) * denoised - (1 / (2 * r)) * old_denoised
+ x = (sigma_fn(t_next) / sigma_fn(t)) * x - (-h).expm1() * denoised_d
+ old_denoised = denoised
+ return x
+
+
+@torch.no_grad()
+def sample_dpmpp_sde(
+ model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None, r=1 / 2
+):
+ """DPM-Solver++ (stochastic)."""
+ sigma_min, sigma_max = sigmas[sigmas > 0].min(), sigmas.max()
+ seed = extra_args.get("seed", None)
+ noise_sampler = (
+ BrownianTreeNoiseSampler(x, sigma_min, sigma_max, seed=seed, cpu=False)
+ if noise_sampler is None
+ else noise_sampler
+ )
+ extra_args = {} if extra_args is None else extra_args
+ s_in = x.new_ones([x.shape[0]])
+ sigma_fn = lambda t: t.neg().exp()
+ t_fn = lambda sigma: sigma.log().neg()
+
+ for i in trange(len(sigmas) - 1, disable=disable):
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
+ if callback is not None:
+ callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
+ if sigmas[i + 1] == 0:
+ # Euler method
+ d = to_d(x, sigmas[i], denoised)
+ dt = sigmas[i + 1] - sigmas[i]
+ x = x + d * dt
+ else:
+ # DPM-Solver++
+ t, t_next = t_fn(sigmas[i]), t_fn(sigmas[i + 1])
+ h = t_next - t
+ s = t + h * r
+ fac = 1 / (2 * r)
+
+ # Step 1
+ sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(s), eta)
+ s_ = t_fn(sd)
+ x_2 = (sigma_fn(s_) / sigma_fn(t)) * x - (t - s_).expm1() * denoised
+ x_2 = x_2 + noise_sampler(sigma_fn(t), sigma_fn(s)) * s_noise * su
+ denoised_2 = model(x_2, sigma_fn(s) * s_in, **extra_args)
+
+ # Step 2
+ sd, su = get_ancestral_step(sigma_fn(t), sigma_fn(t_next), eta)
+ t_next_ = t_fn(sd)
+ denoised_d = (1 - fac) * denoised + fac * denoised_2
+ x = (sigma_fn(t_next_) / sigma_fn(t)) * x - (t - t_next_).expm1() * denoised_d
+ x = x + noise_sampler(sigma_fn(t), sigma_fn(t_next)) * s_noise * su
+ return x
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..08eff5b190845dfe867d53a45bd04ccb4e4fda88
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,22 @@
+torch==2.4.1
+torchvision==0.19.1
+diffusers==0.30.0
+transformers==4.43.3
+optimum-quanto==0.2.4
+gradio==4.44.1
+opencv-python
+httpx>=0.23.3
+timm
+einops
+ftfy
+facexlib
+insightface
+onnxruntime
+onnxruntime-gpu
+accelerate
+SentencePiece
+safetensors
+xformers
+apex
+torchsde
+spaces
\ No newline at end of file