repo_name
stringlengths 7
71
| file_path
stringlengths 5
118
| context
list | import_statement
stringlengths 45
12.5k
| token_num
int64 641
99.4k
| cropped_code
stringlengths 44
17k
| all_code
stringlengths 43
754k
| next_line
stringlengths 2
330
| gold_snippet_index
int64 0
68
| created_at
stringlengths 25
25
| level
stringclasses 9
values |
---|---|---|---|---|---|---|---|---|---|---|
camenduru/MotionDirector-hf | demo/motiondirector.py | [
{
"identifier": "export_to_video",
"path": "MotionDirector_train.py",
"snippet": "def export_to_video(video_frames, output_video_path, fps):\n video_writer = imageio.get_writer(output_video_path, fps=fps)\n for img in video_frames:\n video_writer.append_data(np.array(img))\n video_writer.close()"
},
{
"identifier": "handle_memory_attention",
"path": "MotionDirector_train.py",
"snippet": "def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet):\n try:\n is_torch_2 = hasattr(F, 'scaled_dot_product_attention')\n enable_torch_2 = is_torch_2 and enable_torch_2_attn\n\n if enable_xformers_memory_efficient_attention and not enable_torch_2:\n if is_xformers_available():\n from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n else:\n raise ValueError(\"xformers is not available. Make sure it is installed correctly\")\n\n if enable_torch_2:\n set_torch_2_attn(unet)\n\n except:\n print(\"Could not enable memory efficient attention for xformers or Torch 2.0.\")"
},
{
"identifier": "load_primary_models",
"path": "MotionDirector_train.py",
"snippet": "def load_primary_models(pretrained_model_path):\n noise_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder=\"scheduler\")\n tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder=\"tokenizer\")\n text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder=\"text_encoder\")\n vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder=\"vae\")\n unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder=\"unet\")\n\n return noise_scheduler, tokenizer, text_encoder, vae, unet"
},
{
"identifier": "unet_and_text_g_c",
"path": "MotionDirector_train.py",
"snippet": "def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable):\n unet._set_gradient_checkpointing(value=unet_enable)\n text_encoder._set_gradient_checkpointing(CLIPEncoder, value=text_enable)"
},
{
"identifier": "freeze_models",
"path": "MotionDirector_train.py",
"snippet": "def freeze_models(models_to_freeze):\n for model in models_to_freeze:\n if model is not None: model.requires_grad_(False)"
},
{
"identifier": "LoraHandler",
"path": "utils/lora_handler.py",
"snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = None,\n text_encoder_replace_modules: list = None\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias, scale):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r,\n \"scale\": scale,\n \"dropout_p\": dropout,\n })\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) # inject_trainable_lora_extended\n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16, scale=1.0):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias,\n scale\n )\n\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n\n def save_cloneofsimo_lora(self, model, save_path, step, flag):\n \n def save_lora(model, name, condition, replace_modules, step, save_path, flag=None):\n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules, flag)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path,\n flag\n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path,\n flag\n )\n\n # train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = '', flag=None):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step, flag)"
},
{
"identifier": "ddim_inversion",
"path": "utils/ddim_utils.py",
"snippet": "@torch.no_grad()\ndef ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=\"\"):\n ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)\n return ddim_latents"
},
{
"identifier": "extract_lora_child_module",
"path": "utils/lora.py",
"snippet": "def extract_lora_child_module(model, target_replace_module=DEFAULT_TARGET_REPLACE):\n\n loras = []\n\n for target_replace_module_i in target_replace_module:\n\n for _m, _n, _child_module in _find_modules(\n model,\n [target_replace_module_i],\n search_class=[LoraInjectedLinear, LoraInjectedConv2d, LoraInjectedConv3d],\n ):\n loras.append(_child_module)\n\n if len(loras) == 0:\n raise ValueError(\"No lora injected.\")\n\n return loras"
}
] | import os
import warnings
import torch
import random
import imageio
from typing import Optional
from diffusers import DDIMScheduler, TextToVideoSDPipeline
from einops import rearrange
from torch import Tensor
from torch.nn.functional import interpolate
from tqdm import trange
from MotionDirector_train import export_to_video, handle_memory_attention, load_primary_models, unet_and_text_g_c, freeze_models
from utils.lora_handler import LoraHandler
from utils.ddim_utils import ddim_inversion
from utils.lora import extract_lora_child_module | 4,311 | unet=unet.to(device=device, dtype=torch.half),
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
return pipe
def inverse_video(pipe, latents, num_steps):
ddim_inv_scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
ddim_inv_scheduler.set_timesteps(num_steps)
ddim_inv_latent = ddim_inversion(
pipe, ddim_inv_scheduler, video_latent=latents.to(pipe.device),
num_inv_steps=num_steps, prompt="")[-1]
return ddim_inv_latent
def prepare_input_latents(
pipe: TextToVideoSDPipeline,
batch_size: int,
num_frames: int,
height: int,
width: int,
latents_path:str,
model_select: str,
random_seed: int,
):
# initialize with random gaussian noise
scale = pipe.vae_scale_factor
shape = (batch_size, pipe.unet.config.in_channels, num_frames, height // scale, width // scale)
if random_seed > 1000:
torch.manual_seed(random_seed)
else:
random_seed = random.randint(100, 10000000)
torch.manual_seed(random_seed)
print(f"random_seed: {random_seed}")
if '1-' in model_select:
noise_prior = 0.3
elif '2-' in model_select:
noise_prior = 0.5
elif '3-' in model_select:
noise_prior = 0.
else:
noise_prior = 0.
if noise_prior > 0.:
cached_latents = torch.load(latents_path)
if 'inversion_noise' not in cached_latents:
latents = inverse_video(pipe, cached_latents['latents'].unsqueeze(0), 50).squeeze(0)
else:
latents = torch.load(latents_path)['inversion_noise'].unsqueeze(0)
if latents.shape[0] != batch_size:
latents = latents.repeat(batch_size, 1, 1, 1, 1)
if latents.shape != shape:
latents = interpolate(rearrange(latents, "b c f h w -> (b f) c h w", b=batch_size), (height // scale, width // scale), mode='bilinear')
latents = rearrange(latents, "(b f) c h w -> b c f h w", b=batch_size)
noise = torch.randn_like(latents, dtype=torch.half)
latents_base = noise
latents = (noise_prior) ** 0.5 * latents + (1 - noise_prior) ** 0.5 * noise
else:
latents = torch.randn(shape, dtype=torch.half)
latents_base = latents
return latents, latents_base, random_seed
class MotionDirector():
def __init__(self):
self.version = "0.0.0"
self.foundation_model_path = "./zeroscope_v2_576w/"
self.lora_path = "./MotionDirector_pretrained/dolly_zoom_(hitchcockian_zoom)/checkpoint-default/temporal/lora"
with torch.autocast("cuda", dtype=torch.half):
self.pipe = initialize_pipeline(model=self.foundation_model_path, lora_path=self.lora_path, lora_scale=1)
def reload_lora(self, lora_path):
if lora_path != self.lora_path:
self.lora_path = lora_path
with torch.autocast("cuda", dtype=torch.half):
self.pipe = initialize_pipeline(model=self.foundation_model_path, lora_path=self.lora_path)
def __call__(self, model_select, text_pormpt, neg_text_pormpt, random_seed, steps, guidance_scale, baseline_select):
model_select = str(model_select)
out_name = f"./outputs/inference"
out_name += f"{text_pormpt}".replace(' ', '_').replace(',', '').replace('.', '')
model_select_type = model_select.split('--')[1].strip()
model_select_type = model_select_type.lower().replace(' ', '_')
lora_path = f"./MotionDirector_pretrained/{model_select_type}/checkpoint-default/temporal/lora"
self.reload_lora(lora_path)
latents_folder = f"./MotionDirector_pretrained/{model_select_type}/cached_latents"
latents_path = f"{latents_folder}/{random.choice(os.listdir(latents_folder))}"
assert os.path.exists(lora_path)
device = "cuda"
with torch.autocast(device, dtype=torch.half):
# prepare input latents
with torch.no_grad():
init_latents, init_latents_base, random_seed = prepare_input_latents(
pipe=self.pipe,
batch_size=1,
num_frames=16,
height=384,
width=384,
latents_path=latents_path,
model_select=model_select,
random_seed=random_seed
)
video_frames = self.pipe(
prompt=text_pormpt,
negative_prompt=neg_text_pormpt,
width=384,
height=384,
num_frames=16,
num_inference_steps=steps,
guidance_scale=guidance_scale,
latents=init_latents
).frames
out_file = f"{out_name}_{random_seed}.mp4"
os.makedirs(os.path.dirname(out_file), exist_ok=True)
|
def initialize_pipeline(
model: str,
device: str = "cuda",
xformers: bool = True,
sdp: bool = True,
lora_path: str = "",
lora_rank: int = 32,
lora_scale: float = 1.0,
):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
scheduler, tokenizer, text_encoder, vae, unet = load_primary_models(model)
# Freeze any necessary models
freeze_models([vae, text_encoder, unet])
# Enable xformers if available
handle_memory_attention(xformers, sdp, unet)
lora_manager_temporal = LoraHandler(
version="cloneofsimo",
use_unet_lora=True,
use_text_lora=False,
save_for_webui=False,
only_for_webui=False,
unet_replace_modules=["TransformerTemporalModel"],
text_encoder_replace_modules=None,
lora_bias=None
)
unet_lora_params, unet_negation = lora_manager_temporal.add_lora_to_model(
True, unet, lora_manager_temporal.unet_replace_modules, 0, lora_path, r=lora_rank, scale=lora_scale)
unet.eval()
text_encoder.eval()
unet_and_text_g_c(unet, text_encoder, False, False)
pipe = TextToVideoSDPipeline.from_pretrained(
pretrained_model_name_or_path=model,
scheduler=scheduler,
tokenizer=tokenizer,
text_encoder=text_encoder.to(device=device, dtype=torch.half),
vae=vae.to(device=device, dtype=torch.half),
unet=unet.to(device=device, dtype=torch.half),
)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
return pipe
def inverse_video(pipe, latents, num_steps):
ddim_inv_scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
ddim_inv_scheduler.set_timesteps(num_steps)
ddim_inv_latent = ddim_inversion(
pipe, ddim_inv_scheduler, video_latent=latents.to(pipe.device),
num_inv_steps=num_steps, prompt="")[-1]
return ddim_inv_latent
def prepare_input_latents(
pipe: TextToVideoSDPipeline,
batch_size: int,
num_frames: int,
height: int,
width: int,
latents_path:str,
model_select: str,
random_seed: int,
):
# initialize with random gaussian noise
scale = pipe.vae_scale_factor
shape = (batch_size, pipe.unet.config.in_channels, num_frames, height // scale, width // scale)
if random_seed > 1000:
torch.manual_seed(random_seed)
else:
random_seed = random.randint(100, 10000000)
torch.manual_seed(random_seed)
print(f"random_seed: {random_seed}")
if '1-' in model_select:
noise_prior = 0.3
elif '2-' in model_select:
noise_prior = 0.5
elif '3-' in model_select:
noise_prior = 0.
else:
noise_prior = 0.
if noise_prior > 0.:
cached_latents = torch.load(latents_path)
if 'inversion_noise' not in cached_latents:
latents = inverse_video(pipe, cached_latents['latents'].unsqueeze(0), 50).squeeze(0)
else:
latents = torch.load(latents_path)['inversion_noise'].unsqueeze(0)
if latents.shape[0] != batch_size:
latents = latents.repeat(batch_size, 1, 1, 1, 1)
if latents.shape != shape:
latents = interpolate(rearrange(latents, "b c f h w -> (b f) c h w", b=batch_size), (height // scale, width // scale), mode='bilinear')
latents = rearrange(latents, "(b f) c h w -> b c f h w", b=batch_size)
noise = torch.randn_like(latents, dtype=torch.half)
latents_base = noise
latents = (noise_prior) ** 0.5 * latents + (1 - noise_prior) ** 0.5 * noise
else:
latents = torch.randn(shape, dtype=torch.half)
latents_base = latents
return latents, latents_base, random_seed
class MotionDirector():
def __init__(self):
self.version = "0.0.0"
self.foundation_model_path = "./zeroscope_v2_576w/"
self.lora_path = "./MotionDirector_pretrained/dolly_zoom_(hitchcockian_zoom)/checkpoint-default/temporal/lora"
with torch.autocast("cuda", dtype=torch.half):
self.pipe = initialize_pipeline(model=self.foundation_model_path, lora_path=self.lora_path, lora_scale=1)
def reload_lora(self, lora_path):
if lora_path != self.lora_path:
self.lora_path = lora_path
with torch.autocast("cuda", dtype=torch.half):
self.pipe = initialize_pipeline(model=self.foundation_model_path, lora_path=self.lora_path)
def __call__(self, model_select, text_pormpt, neg_text_pormpt, random_seed, steps, guidance_scale, baseline_select):
model_select = str(model_select)
out_name = f"./outputs/inference"
out_name += f"{text_pormpt}".replace(' ', '_').replace(',', '').replace('.', '')
model_select_type = model_select.split('--')[1].strip()
model_select_type = model_select_type.lower().replace(' ', '_')
lora_path = f"./MotionDirector_pretrained/{model_select_type}/checkpoint-default/temporal/lora"
self.reload_lora(lora_path)
latents_folder = f"./MotionDirector_pretrained/{model_select_type}/cached_latents"
latents_path = f"{latents_folder}/{random.choice(os.listdir(latents_folder))}"
assert os.path.exists(lora_path)
device = "cuda"
with torch.autocast(device, dtype=torch.half):
# prepare input latents
with torch.no_grad():
init_latents, init_latents_base, random_seed = prepare_input_latents(
pipe=self.pipe,
batch_size=1,
num_frames=16,
height=384,
width=384,
latents_path=latents_path,
model_select=model_select,
random_seed=random_seed
)
video_frames = self.pipe(
prompt=text_pormpt,
negative_prompt=neg_text_pormpt,
width=384,
height=384,
num_frames=16,
num_inference_steps=steps,
guidance_scale=guidance_scale,
latents=init_latents
).frames
out_file = f"{out_name}_{random_seed}.mp4"
os.makedirs(os.path.dirname(out_file), exist_ok=True) | export_to_video(video_frames, out_file, 8) | 0 | 2023-12-11 04:51:39+00:00 | 8k |
Yingyue-L/Mamba-LLaVA | llava/serve/cli.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "DEFAULT_IM_START_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\""
},
{
"identifier": "DEFAULT_IM_END_TOKEN",
"path": "llava/constants.py",
"snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\""
},
{
"identifier": "conv_templates",
"path": "llava/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "load_pretrained_model",
"path": "llava/model/builder.py",
"snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\", **kwargs):\n kwargs = {\"device_map\": device_map, **kwargs}\n\n if device != \"cuda\":\n kwargs['device_map'] = {\"\": device}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n\n if 'llava' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is None:\n warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n print('Loading LLaVA from base model...')\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional LLaVA weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading LLaVA from base model...')\n if 'mpt' in model_name.lower():\n if not os.path.isfile(os.path.join(model_path, 'configuration_mpt.py')):\n shutil.copyfile(os.path.join(model_base, 'configuration_mpt.py'), os.path.join(model_path, 'configuration_mpt.py'))\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=True)\n cfg_pretrained = AutoConfig.from_pretrained(model_path, trust_remote_code=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n elif \"mamba\" in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(\"/data/yingyueli/hub/gpt-neox-20b\")\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaMambaForCausalLM.from_pretrained(model_base, dtype=torch.float16, config=cfg_pretrained, device=device)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n\n mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')\n mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}\n model.load_state_dict(mm_projector_weights, strict=False)\n else:\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = LlavaMPTForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n image_processor = None\n\n if 'llava' in model_name.lower():\n mm_use_im_start_end = getattr(model.config, \"mm_use_im_start_end\", False)\n mm_use_im_patch_token = getattr(model.config, \"mm_use_im_patch_token\", True)\n if mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n if mm_use_im_start_end:\n tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)\n model.resize_token_embeddings(len(tokenizer))\n\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device=device, dtype=torch.float16)\n image_processor = vision_tower.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len"
},
{
"identifier": "disable_torch_init",
"path": "llava/utils.py",
"snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)"
},
{
"identifier": "process_images",
"path": "llava/mm_utils.py",
"snippet": "def process_images(images, image_processor, model_cfg):\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images"
},
{
"identifier": "tokenizer_image_token",
"path": "llava/mm_utils.py",
"snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "get_model_name_from_path",
"path": "llava/mm_utils.py",
"snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]"
},
{
"identifier": "KeywordsStoppingCriteria",
"path": "llava/mm_utils.py",
"snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n self.max_keyword_len = 0\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n if len(cur_keyword_ids) > self.max_keyword_len:\n self.max_keyword_len = len(cur_keyword_ids)\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n \n def call_for_batch(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n offset = min(output_ids.shape[1] - self.start_len, self.max_keyword_len)\n self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]\n for keyword_id in self.keyword_ids:\n if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():\n return True\n outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False\n \n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n outputs = []\n for i in range(output_ids.shape[0]):\n outputs.append(self.call_for_batch(output_ids[i].unsqueeze(0), scores))\n return all(outputs)"
}
] | import argparse
import torch
import requests
from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava.conversation import conv_templates, SeparatorStyle
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from PIL import Image
from PIL import Image
from io import BytesIO
from transformers import TextStreamer | 3,761 |
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
if 'llama-2' in model_name.lower():
conv_mode = "llava_llama_2"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
conv_mode = "mpt"
else:
conv_mode = "llava_v0"
if args.conv_mode is not None and conv_mode != args.conv_mode:
print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
else:
args.conv_mode = conv_mode
conv = conv_templates[args.conv_mode].copy()
if "mpt" in model_name.lower():
roles = ('user', 'assistant')
else:
roles = conv.roles
image = load_image(args.image_file)
# Similar operation in model_worker.py
image_tensor = process_images([image], image_processor, model.config)
if type(image_tensor) is list:
image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor]
else:
image_tensor = image_tensor.to(model.device, dtype=torch.float16)
while True:
try:
inp = input(f"{roles[0]}: ")
except EOFError:
inp = ""
if not inp:
print("exit...")
break
print(f"{roles[1]}: ", end="")
if image is not None:
# first message
if model.config.mm_use_im_start_end:
inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
else:
inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
conv.append_message(conv.roles[0], inp)
image = None
else:
# later messages
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device)
|
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device)
if 'llama-2' in model_name.lower():
conv_mode = "llava_llama_2"
elif "v1" in model_name.lower():
conv_mode = "llava_v1"
elif "mpt" in model_name.lower():
conv_mode = "mpt"
else:
conv_mode = "llava_v0"
if args.conv_mode is not None and conv_mode != args.conv_mode:
print('[WARNING] the auto inferred conversation mode is {}, while `--conv-mode` is {}, using {}'.format(conv_mode, args.conv_mode, args.conv_mode))
else:
args.conv_mode = conv_mode
conv = conv_templates[args.conv_mode].copy()
if "mpt" in model_name.lower():
roles = ('user', 'assistant')
else:
roles = conv.roles
image = load_image(args.image_file)
# Similar operation in model_worker.py
image_tensor = process_images([image], image_processor, model.config)
if type(image_tensor) is list:
image_tensor = [image.to(model.device, dtype=torch.float16) for image in image_tensor]
else:
image_tensor = image_tensor.to(model.device, dtype=torch.float16)
while True:
try:
inp = input(f"{roles[0]}: ")
except EOFError:
inp = ""
if not inp:
print("exit...")
break
print(f"{roles[1]}: ", end="")
if image is not None:
# first message
if model.config.mm_use_im_start_end:
inp = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + inp
else:
inp = DEFAULT_IMAGE_TOKEN + '\n' + inp
conv.append_message(conv.roles[0], inp)
image = None
else:
# later messages
conv.append_message(conv.roles[0], inp)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).to(model.device) | stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2 | 4 | 2023-12-09 09:39:13+00:00 | 8k |
Theia-4869/MoSA | src/models/vit_adapter/swin_adapter.py | [
{
"identifier": "Mlp",
"path": "src/models/vit_backbones/swin_transformer.py",
"snippet": "class Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x"
},
{
"identifier": "SwinTransformerBlock",
"path": "src/models/vit_backbones/swin_transformer.py",
"snippet": "class SwinTransformerBlock(nn.Module):\n r\"\"\" Swin Transformer Block.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resulotion.\n num_heads (int): Number of attention heads.\n window_size (int): Window size.\n shift_size (int): Shift size for SW-MSA.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float, optional): Stochastic depth rate. Default: 0.0\n act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.num_heads = num_heads\n self.window_size = window_size\n self.shift_size = shift_size\n self.mlp_ratio = mlp_ratio\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, we don't partition windows\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n self.norm1 = norm_layer(dim)\n self.attn = WindowAttention(\n dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n self.norm2 = norm_layer(dim)\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n H, W = self.input_resolution\n img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size, -self.shift_size),\n slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer(\"attn_mask\", attn_mask)\n\n def forward(self, x):\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n\n shortcut = x\n x = self.norm1(x)\n x = x.view(B, H, W, C)\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n else:\n shifted_x = x\n\n # partition windows\n x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C\n x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C\n\n # W-MSA/SW-MSA\n attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C\n\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n else:\n x = shifted_x\n x = x.view(B, H * W, C)\n\n # FFN\n x = shortcut + self.drop_path(x)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, \" \\\n f\"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}\"\n\n def flops(self):\n flops = 0\n H, W = self.input_resolution\n # norm1\n flops += self.dim * H * W\n # W-MSA/SW-MSA\n nW = H * W / self.window_size / self.window_size\n flops += nW * self.attn.flops(self.window_size * self.window_size)\n # mlp\n flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio\n # norm2\n flops += self.dim * H * W\n return flops"
},
{
"identifier": "BasicLayer",
"path": "src/models/vit_backbones/swin_transformer.py",
"snippet": "class BasicLayer(nn.Module):\n \"\"\" A basic Swin Transformer layer for one stage.\n Args:\n dim (int): Number of input channels.\n input_resolution (tuple[int]): Input resolution.\n depth (int): Number of blocks.\n num_heads (int): Number of attention heads.\n window_size (int): Local window size.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n drop (float, optional): Dropout rate. Default: 0.0\n attn_drop (float, optional): Attention dropout rate. Default: 0.0\n drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self, dim, input_resolution, depth, num_heads, window_size,\n mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0.,\n attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm,\n downsample=None, use_checkpoint=False,\n block_module=SwinTransformerBlock,\n # add two more parameters for prompt\n num_prompts=None, prompt_location=None, deep_prompt=False,\n # add one more parameters for adapter\n adapter_config=None,\n ):\n\n super().__init__()\n self.dim = dim\n self.input_resolution = input_resolution\n self.depth = depth\n self.use_checkpoint = use_checkpoint\n self.deep_prompt = deep_prompt\n\n # build blocks\n if num_prompts is not None:\n self.blocks = nn.ModuleList([\n block_module(\n num_prompts, prompt_location,\n dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, # noqa\n norm_layer=norm_layer)\n for i in range(depth)])\n self.deep_prompt = deep_prompt\n self.num_prompts = num_prompts\n self.prompt_location = prompt_location\n if self.deep_prompt and self.prompt_location != \"prepend\":\n raise ValueError(\"deep prompt mode for swin is only applicable to prepend\")\n elif adapter_config is not None:\n self.blocks = nn.ModuleList([\n block_module(\n dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, # noqa\n norm_layer=norm_layer,\n adapter_config=adapter_config)\n for i in range(depth)])\n else:\n self.blocks = nn.ModuleList([\n block_module(\n dim=dim, input_resolution=input_resolution,\n num_heads=num_heads, window_size=window_size,\n shift_size=0 if (i % 2 == 0) else window_size // 2,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop, attn_drop=attn_drop,\n drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, # noqa\n norm_layer=norm_layer)\n for i in range(depth)])\n\n # patch merging layer\n if downsample is not None:\n if num_prompts is None:\n self.downsample = downsample(\n input_resolution, dim=dim, norm_layer=norm_layer\n )\n else:\n self.downsample = downsample(\n num_prompts, prompt_location, deep_prompt,\n input_resolution, dim=dim, norm_layer=norm_layer\n )\n else:\n self.downsample = None\n\n def forward(self, x, deep_prompt_embd=None):\n if self.deep_prompt and deep_prompt_embd is None:\n raise ValueError(\"need deep_prompt embddings\")\n\n if not self.deep_prompt:\n for blk in self.blocks:\n if self.use_checkpoint:\n x = checkpoint.checkpoint(blk, x)\n else:\n x = blk(x)\n else:\n # add the prompt embed before each blk call\n B = x.shape[0] # batchsize\n num_blocks = len(self.blocks)\n if deep_prompt_embd.shape[0] != num_blocks:\n # first layer\n for i in range(num_blocks):\n if i == 0:\n x = self.blocks[i](x)\n\n else:\n prompt_emb = deep_prompt_embd[i-1].expand(B, -1, -1)\n x = torch.cat(\n (prompt_emb, x[:, self.num_prompts:, :]),\n dim=1\n )\n x = self.blocks[i](x)\n else:\n # other layers\n for i in range(num_blocks):\n prompt_emb = deep_prompt_embd[i].expand(B, -1, -1)\n x = torch.cat(\n (prompt_emb, x[:, self.num_prompts:, :]),\n dim=1\n )\n x = self.blocks[i](x)\n\n if self.downsample is not None:\n x = self.downsample(x)\n return x\n\n def extra_repr(self) -> str:\n return f\"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}\"\n\n def flops(self):\n flops = 0\n for blk in self.blocks:\n flops += blk.flops()\n if self.downsample is not None:\n flops += self.downsample.flops()\n return flops"
},
{
"identifier": "PatchMerging",
"path": "src/models/vit_backbones/swin_transformer.py",
"snippet": "class PatchMerging(nn.Module):\n r\"\"\" Patch Merging Layer.\n Args:\n input_resolution (tuple[int]): Resolution of input feature.\n dim (int): Number of input channels.\n norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n \"\"\"\n\n def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):\n super().__init__()\n self.input_resolution = input_resolution\n self.dim = dim\n self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n self.norm = norm_layer(4 * dim)\n\n def forward(self, x):\n \"\"\"\n x: B, H*W, C\n \"\"\"\n H, W = self.input_resolution\n B, L, C = x.shape\n assert L == H * W, \"input feature has wrong size\"\n assert H % 2 == 0 and W % 2 == 0, f\"x size ({H}*{W}) are not even.\"\n\n x = x.view(B, H, W, C)\n\n x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C\n x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C\n x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C\n x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C\n x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C\n x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C\n\n x = self.norm(x)\n x = self.reduction(x)\n\n return x\n\n def extra_repr(self) -> str:\n return f\"input_resolution={self.input_resolution}, dim={self.dim}\"\n\n def flops(self):\n H, W = self.input_resolution\n flops = H * W * self.dim\n flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim\n return flops"
},
{
"identifier": "SwinTransformer",
"path": "src/models/vit_backbones/swin_transformer.py",
"snippet": "class SwinTransformer(nn.Module):\n r\"\"\" Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n \"\"\"\n\n def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,\n embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],\n window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,\n drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,\n norm_layer=nn.LayerNorm, ape=False, patch_norm=True,\n use_checkpoint=False, **kwargs):\n super().__init__()\n\n self.num_classes = num_classes\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))\n self.mlp_ratio = mlp_ratio\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n num_patches = self.patch_embed.num_patches\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # absolute position embedding\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),\n input_resolution=(patches_resolution[0] // (2 ** i_layer),\n patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=self.mlp_ratio,\n qkv_bias=qkv_bias, qk_scale=qk_scale,\n drop=drop_rate, attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n\n self.norm = norm_layer(self.num_features)\n self.avgpool = nn.AdaptiveAvgPool1d(1)\n self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return {'absolute_pos_embed'}\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'relative_position_bias_table'}\n\n def forward_features(self, x):\n x = self.patch_embed(x)\n if self.ape:\n x = x + self.absolute_pos_embed\n x = self.pos_drop(x)\n\n for layer in self.layers:\n x = layer(x)\n # logger.info(x.shape)\n\n x = self.norm(x) # B L C\n x = self.avgpool(x.transpose(1, 2)) # B C 1\n x = torch.flatten(x, 1)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.head(x)\n return x\n\n def flops(self):\n flops = 0\n flops += self.patch_embed.flops()\n for i, layer in enumerate(self.layers):\n flops += layer.flops()\n flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)\n flops += self.num_features * self.num_classes\n return flops"
},
{
"identifier": "logging",
"path": "src/utils/logging.py",
"snippet": "_FORMAT = \"[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s\"\ndef _suppress_print():\n def print_pass(*objects, sep=\" \", end=\"\\n\", file=sys.stdout, flush=False):\ndef _cached_log_stream(filename):\ndef setup_logging(\n num_gpu, num_shards, output=\"\", name=\"MOSA\", color=True):\ndef setup_single_logging(name, output=\"\"):\ndef get_logger(name):\ndef log_json_stats(stats, sort_keys=True):\n def __init__(self, *args, **kwargs):\n def formatMessage(self, record: logging.LogRecord) -> str:\nclass _ColorfulFormatter(logging.Formatter):"
}
] | import copy
import math
import numpy as np
import torch
import torch.nn as nn
from ..vit_backbones.swin_transformer import Mlp, SwinTransformerBlock, BasicLayer, PatchMerging, SwinTransformer
from ...utils import logging | 6,072 | #!/usr/bin/env python3
"""
vit with adapter
"""
logger = logging.get_logger("MOSA")
class AdaptedMlp(Mlp):
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.,
adapter_config=None, adapter_scalar=1.0, dropout=0.0
):
super(AdaptedMlp, self).__init__(in_features, hidden_features, out_features, act_layer, drop)
self.adapter_config = adapter_config
if adapter_scalar is None:
self.adapter_scale = nn.Parameter(torch.ones(1))
else:
self.adapter_scale = adapter_scalar
self.dropout = dropout
out_features = out_features or in_features
self.adapter_down = nn.Linear(
in_features,
adapter_config.BOTTLENECK_SIZE
)
self.adapter_up = nn.Linear(
adapter_config.BOTTLENECK_SIZE,
out_features
)
self.adapter_act_fn = nn.ReLU()
nn.init.kaiming_uniform_(self.adapter_down.weight, a=math.sqrt(5))
nn.init.zeros_(self.adapter_down.bias)
nn.init.zeros_(self.adapter_up.weight)
nn.init.zeros_(self.adapter_up.bias)
def forward(self, x):
# same as reguluar Mlp block
h = x
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
# start to insert adapter layers...
adpt = self.adapter_down(h)
adpt = self.adapter_act_fn(adpt)
adpt = nn.functional.dropout(adpt, p=self.dropout, training=self.training)
adpt = self.adapter_up(adpt)
x = adpt * self.adapter_scale + x
# ...end
return x
| #!/usr/bin/env python3
"""
vit with adapter
"""
logger = logging.get_logger("MOSA")
class AdaptedMlp(Mlp):
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.,
adapter_config=None, adapter_scalar=1.0, dropout=0.0
):
super(AdaptedMlp, self).__init__(in_features, hidden_features, out_features, act_layer, drop)
self.adapter_config = adapter_config
if adapter_scalar is None:
self.adapter_scale = nn.Parameter(torch.ones(1))
else:
self.adapter_scale = adapter_scalar
self.dropout = dropout
out_features = out_features or in_features
self.adapter_down = nn.Linear(
in_features,
adapter_config.BOTTLENECK_SIZE
)
self.adapter_up = nn.Linear(
adapter_config.BOTTLENECK_SIZE,
out_features
)
self.adapter_act_fn = nn.ReLU()
nn.init.kaiming_uniform_(self.adapter_down.weight, a=math.sqrt(5))
nn.init.zeros_(self.adapter_down.bias)
nn.init.zeros_(self.adapter_up.weight)
nn.init.zeros_(self.adapter_up.bias)
def forward(self, x):
# same as reguluar Mlp block
h = x
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
# start to insert adapter layers...
adpt = self.adapter_down(h)
adpt = self.adapter_act_fn(adpt)
adpt = nn.functional.dropout(adpt, p=self.dropout, training=self.training)
adpt = self.adapter_up(adpt)
x = adpt * self.adapter_scale + x
# ...end
return x
| class AdaptedSwinTransformerBlock(SwinTransformerBlock): | 1 | 2023-12-06 07:50:16+00:00 | 8k |
khwong-c/syn-magia | tests/core/test_const.py | [
{
"identifier": "Constant",
"path": "magia/core.py",
"snippet": "class Constant(Signal):\n \"\"\"\n Representing a constant signal. The value stored in bytes representing the constance driver.\n \"\"\"\n new_const_counter = count(0)\n\n def __init__(\n self,\n value, width: int, signed: bool = False,\n name: Optional[str] = None,\n **kwargs\n ):\n if name is None:\n name = f\"const_{next(self.new_const_counter)}\"\n\n super().__init__(width=width, signed=signed, name=name, **kwargs)\n self._config.signal_type = SignalType.CONSTANT\n self.value: bytes = value\n\n def elaborate(self) -> str:\n signal_decl = self.signal_decl()\n assignment = self._SIGNAL_ASSIGN_TEMPLATE.substitute(\n name=self.net_name,\n driver=self.sv_constant(self.value, len(self), self.signed),\n )\n return \"\\n\".join((signal_decl, assignment))\n\n @staticmethod\n def sv_constant(value: Optional[Union[int, bytes]], width: int, signed: bool = False) -> str:\n \"\"\"\n Convert a Python integer or bytes object to a SystemVerilog constant expression.\n If value is None, return \"X\", the SystemVerilog constant for an unknown value.\n \"\"\"\n byte_cnt = ceil(width / 8)\n if value is not None:\n if isinstance(value, int):\n value = value.to_bytes(byte_cnt, byteorder=\"big\", signed=signed)\n byte_mask = (2 ** width - 1).to_bytes(byte_cnt, byteorder=\"big\")\n value = bytes([x & y for x, y in zip(value, byte_mask)])\n value = value.hex()[-(ceil(width / 4)):].upper()\n else:\n value = \"X\"\n sign = \"s\" if signed else \"\"\n return f\"{width}'{sign}h{value}\""
},
{
"identifier": "Output",
"path": "magia/core.py",
"snippet": "class Output(Signal):\n \"\"\"\n Representing an output signal.\n They are the starting points when we elaborate the module.\n It is used by both the module declaration and the module instance.\n \"\"\"\n\n def __init__(\n self,\n name: str, width: int, signed: bool = False,\n owner_instance: Optional[\"Instance\"] = None,\n **kwargs\n ):\n \"\"\"\n I/O ports must have name and width well-defined by designers.\n \"\"\"\n if name is None:\n raise ValueError(\"Output name is not set\")\n if width == 0:\n raise ValueError(\"Output width is not set\")\n super().__init__(name=name, width=width, signed=signed, **kwargs)\n self._config.signal_type = SignalType.OUTPUT\n self._config.owner_instance = owner_instance\n\n @property\n def net_name(self) -> str:\n \"\"\"\n Net Name of I/O must be the same with the name, even they are within an IOBundle\n \"\"\"\n return self.name\n\n def elaborate(self) -> str:\n \"\"\"\n Elaborate the output signal in the module declaration.\n :return: output logic (signed) [...]PORT_NAME\n \"\"\"\n port_decl = self.signal_decl().rstrip(\";\")\n return f\"output {port_decl}\"\n\n def copy(self, owner_instance: Optional[\"Instance\"] = None, **kwargs) -> \"Output\":\n \"\"\"\n Copy the output signal. Driver is discarded.\n I/O port can only be assigned to an instance, not a SignalBundle / IOBundle.\n :return: A new output signal with the same configuration.\n \"\"\"\n return Output(\n name=self.name,\n width=len(self),\n signed=self.signed,\n description=self.description,\n owner_instance=owner_instance,\n )"
},
{
"identifier": "Module",
"path": "magia/module.py",
"snippet": "class Module(Synthesizable):\n \"\"\"\n A module is a collection of signals and operations. It can also include other modules.\n The module is the base class of specialized modules.\n Developers can define the generic behavior of the module in a dynamic way,\n while each `Module` objects is a specialized module initialized with specific parameters.\n\n The SystemVerilog Keyword `parameters` is not used here.\n It is because we can generate the code for the specialized module with parametrized values hard-coded.\n\n The module can be instantiated with the `instance` method.\n\n Designers shall implement the circuit logic in the `__init__` method.\n However, we highly recommend designers to extract the logic implementation into a seperated method.\n e.g.\n def __init__(self, **kwargs):\n self.io += Input(\"a\", 8)\n self.io += Output(\"q\", 8)\n self.implement()\n\n def implement(self):\n self.io.q <<= self.io.a + 1\n \"\"\"\n _MOD_DECL_TEMPLATE = Template(\"module $name (\\n$io\\n);\")\n _new_module_counter = count(0)\n output_file: Optional[PathLike] = None\n\n def __init__(self, name: Optional[str] = None, **kwargs):\n super().__init__(**kwargs)\n\n # Get the arguments passed to the __init__ method of the inherited class\n # === DON'T REFACTOR BELOW. We are inspecting the stack and refactoring will affect the result ===\n children_local = inspect.stack(0)[1].frame.f_locals\n children_class = children_local.get(\"__class__\")\n func_signature = inspect.signature(children_class.__init__) if children_class else {}\n self._mod_params = OrderedDict(**{\n arg: children_local[arg]\n for arg, param in func_signature.parameters.items()\n if param.kind not in (param.VAR_KEYWORD, param.VAR_POSITIONAL) and arg != \"self\"\n })\n # === DON'T REFACTOR ABOVE ===\n\n if name is None:\n name = f\"{self.__class__.__name__}_{next(self._new_module_counter)}\"\n\n self._config = ModuleConfig(\n module_class=type(self),\n name=name,\n )\n self.io = IOBundle()\n\n def validate(self) -> list[Exception]:\n undriven_outputs = [\n output.net_name\n for output in self.io.outputs\n if output.driver() is None\n ]\n if undriven_outputs:\n return [\n ValueError(\"Output not driven\", output)\n for output in undriven_outputs\n ]\n return []\n\n def mod_declaration(self) -> str:\n mod_decl = self._MOD_DECL_TEMPLATE.substitute(\n name=self.name,\n io=\",\\n\".join(\n port.elaborate()\n for port in self.io.inputs + self.io.outputs\n ),\n )\n return \"\\n\".join((mod_decl, self._module_elab_doc))\n\n def elaborate(self) -> tuple[str, set[\"Module\"]]:\n \"\"\"\n Trace nets and operations from output ports\n This method generates the SystemVerilog code for the module.\n\n :return: The SystemVerilog code for the module, and the list of submodules of the instance in the module.\n \"\"\"\n violations = self.validate()\n if violations:\n raise ValueError(f\"Module {self.name} is not valid.\", violations)\n\n mod_decl = self.mod_declaration()\n\n signals, insts = self.trace()\n\n mod_impl = [\n inst.elaborate()\n for inst in insts\n ]\n mod_impl += [\n signal.elaborate()\n for signal in signals\n ]\n\n mod_impl = \"\\n\".join(mod_impl)\n\n mod_output_assignment = \"\\n\".join(\n Signal._SIGNAL_ASSIGN_TEMPLATE.substitute(\n name=output.net_name,\n driver=output.driver().net_name,\n )\n for output in self.io.outputs\n )\n\n extra_code = self.post_elaborate()\n\n mod_end = \"endmodule\"\n\n sv_code = \"\\n\".join((mod_decl, mod_impl, mod_output_assignment, extra_code, mod_end))\n submodules = {inst.module for inst in insts}\n\n return sv_code, submodules\n\n def post_elaborate(self) -> str:\n \"\"\"\n Override this method to add extra code to the module.\n The code will be added after the elaboration of the module.\n\n Adding assertions to the module is a typical use case.\n\n :return: The extra code to be added to the module.\n \"\"\"\n _ = self # Stub to avoid IDE/Lint warning\n return \"\"\n\n def trace(self) -> tuple[list[Union[Signal, Memory]], list[\"Instance\"]]:\n \"\"\"\n Trace nets and instances from output ports\n \"\"\"\n traced_sig_id: set[int] = set()\n traced_inst_id: set[int] = set()\n traced_signal: list[Union[Signal, Memory]] = []\n traced_inst: list[Instance] = []\n sig_to_be_traced: dict[int, Signal] = {}\n\n for output in self.io.outputs:\n sig_to_be_traced |= {\n id(sig): sig\n for sig in output.drivers\n }\n while sig_to_be_traced:\n next_trace = {}\n for signal_id, signal in sig_to_be_traced.items():\n\n # Tracing Instances with Output connected\n if signal.type == SignalType.OUTPUT:\n inst: Optional[Instance] = signal.owner_instance\n if inst is not None and id(inst) not in traced_inst_id:\n traced_inst_id.add(id(inst))\n traced_inst.append(inst)\n\n # The Input port of the instance is skipped\n # We will go directly to the driver as it must be driven by another signal.\n input_drivers = [i.driver() for i in inst.inputs.values()]\n next_trace |= {\n id_sig: sig\n for sig in input_drivers\n if (id_sig := id(sig)) not in traced_sig_id\n }\n elif signal.type != SignalType.INPUT and signal_id not in traced_sig_id:\n traced_sig_id.add(signal_id)\n traced_signal.append(signal)\n\n next_trace |= {\n id_sig: sig\n for sig in signal.drivers\n if sig.type not in (SignalType.INPUT,)\n and (id_sig := id(sig)) not in traced_sig_id\n }\n\n if signal.type == SignalType.MEMORY:\n signal: MemorySignal\n if id(signal.memory) not in traced_sig_id:\n traced_sig_id.add(id(signal.memory))\n traced_signal.append(signal.memory)\n\n next_trace |= {\n id_sig: sig\n for sig in signal.memory.drivers\n if (id_sig := id(sig)) not in traced_sig_id\n }\n\n sig_to_be_traced = next_trace\n\n traced_signal.reverse()\n traced_inst.reverse()\n\n # Check if we have name conflict on the signals and instances\n sig_name_counter = Counter(sig.net_name for sig in traced_signal)\n inst_name_counter = Counter(inst.name for inst in traced_inst)\n sig_conflicts = [name for name, cnt in sig_name_counter.items() if cnt > 1]\n inst_conflicts = [name for name, cnt in inst_name_counter.items() if cnt > 1]\n if sig_conflicts:\n raise ValueError(f\"Signal name conflict: {sig_conflicts}\")\n if inst_conflicts:\n raise ValueError(f\"Instance name conflict: {inst_conflicts}\")\n\n return traced_signal, traced_inst\n\n def instance(\n self, name: Optional[str] = None,\n io: Optional[dict[str, Signal]] = None\n ) -> \"Instance\":\n \"\"\"\n Create an instance of the module\n :return: The created instance\n \"\"\"\n return Instance(\n module=self,\n name=name,\n io=io,\n )\n\n @property\n def name(self) -> str:\n return self._config.name\n\n @property\n def params(self) -> dict[str, object]:\n \"\"\"\n Return the parameters used to specialize this module.\n \"\"\"\n return self._mod_params\n\n @property\n def _module_elab_doc(self) -> str:\n \"\"\"\n Generate the summary of a module and register it to the module.\n It will be written into the SystemVerilog code during elaboration.\n \"\"\"\n doc = self._module_doc_str\n\n if self.params:\n doc += \"\\nModule Parameters:\\n\"\n doc += \"-----------------\\n\"\n doc += \"\\n\".join(\n f\"{k}: {v}\"\n for k, v in self.params.items()\n ) + \"\\n\"\n\n if doc:\n doc = f\"/*\\n{doc}*/\\n\"\n return doc\n\n @property\n def _module_doc_str(self) -> str:\n doc = inspect.getdoc(self.__class__)\n if doc is None or doc == inspect.getdoc(Module):\n return \"\"\n if not doc.endswith(\"\\n\"):\n return doc + \"\\n\"\n return doc\n\n @cached_property\n def _module_init_param_doc(self) -> dict[str, str]:\n params = [(k, f\"{k}:\") for k in self._mod_params]\n doc = inspect.getdoc(self.__init__)\n if doc is None:\n return []\n\n result_doc = {}\n possible_param = [line.strip() for line in doc.split(\"\\n\") if \":\" in line]\n for line in possible_param:\n for param, sep in params:\n if sep in line:\n result_doc[param] = line.split(sep, 1)[-1].strip()\n return result_doc\n\n @property\n def spec(self) -> dict[str, object]:\n \"\"\"\n Return the \"Specification\" of a specialized Module.\n It is a dictionary which can be further processed.\n \"\"\"\n return {\n \"name\": self.name,\n \"description\": self._module_doc_str.strip(),\n \"parameters\": [\n {\n \"name\": k,\n \"value\": v,\n \"description\": self._module_init_param_doc.get(k, \"\"),\n }\n for k, v in self.params.items()\n ],\n \"ports\": [\n {\n \"name\": alias,\n \"direction\": signal.type.name,\n \"width\": len(signal),\n \"signed\": signal.signed,\n \"description\": signal.description,\n }\n for alias, signal in self.io.signals.items()\n ],\n }"
}
] | from pathlib import Path
from cocotb_test.simulator import run as sim_run
from magia import Constant, Module, Output
import cocotb.clock
import pytest
import tests.helper as helper | 3,840 |
test_constants = [
# Format: (value, width, signed) # noqa: ERA001
(0, 8, False),
(0, 8, True),
(0xFF, 8, False),
(-1, 8, True),
(0x0F, 8, False),
(0x0F, 8, True),
(0x0F, 4, False),
(0x0F, 4, True),
(0x0F, 2, False),
(0x0F, 2, True),
(0x0F, 1, False),
(0x0F, 1, True),
(0x0F, 16, False),
(0x0F, 16, True),
(0x0F, 32, False),
(0x0F, 32, True),
(0x0F, 64, False),
(0x0F, 64, True),
(-10, 3, True),
]
test_constants += [(i, 5, True) for i in range(-16, 16)]
test_constants += [(i, 5, False) for i in range(0, 32)]
class AssignmentModule(Module):
def __init__(self, constant_list, **kwargs):
super().__init__(**kwargs)
for i, (value, width, signed) in enumerate(constant_list):
port_name = f"q{i}"
|
test_constants = [
# Format: (value, width, signed) # noqa: ERA001
(0, 8, False),
(0, 8, True),
(0xFF, 8, False),
(-1, 8, True),
(0x0F, 8, False),
(0x0F, 8, True),
(0x0F, 4, False),
(0x0F, 4, True),
(0x0F, 2, False),
(0x0F, 2, True),
(0x0F, 1, False),
(0x0F, 1, True),
(0x0F, 16, False),
(0x0F, 16, True),
(0x0F, 32, False),
(0x0F, 32, True),
(0x0F, 64, False),
(0x0F, 64, True),
(-10, 3, True),
]
test_constants += [(i, 5, True) for i in range(-16, 16)]
test_constants += [(i, 5, False) for i in range(0, 32)]
class AssignmentModule(Module):
def __init__(self, constant_list, **kwargs):
super().__init__(**kwargs)
for i, (value, width, signed) in enumerate(constant_list):
port_name = f"q{i}" | self.io += Output(port_name, width, signed=signed) | 1 | 2023-12-12 22:50:43+00:00 | 8k |
batmanlab/DrasCLR | train.py | [
{
"identifier": "Encoder",
"path": "models/cnn3d.py",
"snippet": "class Encoder(nn.Module):\n\n def __init__(self, rep_dim, moco_dim, num_experts, num_coordinates):\n super(Encoder, self).__init__()\n self.rep_dim = rep_dim\n self.moco_dim = moco_dim\n self.num_experts = num_experts\n self.num_coordinates = num_coordinates\n self.conv1 = Conv3d(1, 8, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn1 = nn.BatchNorm3d(8)\n self.act = nn.ELU()\n self.conv2 = Conv3d(8, 8, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn2 = nn.BatchNorm3d(8)\n self.downsample1 = Block(8, 16, self.num_experts, self.num_coordinates)\n self.downsample2 = Block(16, 32, self.num_experts, self.num_coordinates)\n self.downsample3 = Block(32, 64, self.num_experts, self.num_coordinates)\n self.conv3 = Conv3d(64, 128, kernel_size=3, stride=1, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn3 = nn.BatchNorm3d(128)\n self.conv4 = Conv3d(128, rep_dim, kernel_size=3, stride=2, padding=1, num_experts=self.num_experts, num_coordinates=self.num_coordinates)\n self.bn4 = nn.BatchNorm3d(rep_dim)\n self.fc = nn.Linear(rep_dim, moco_dim)\n\n def forward(self, x, loc):\n x = self.conv1(x, loc)\n x = self.bn1(x)\n x = self.act(x)\n x = self.conv2(x, loc)\n x = self.bn2(x)\n x = self.act(x)\n x = self.downsample1(x, loc)\n x = self.downsample2(x, loc)\n x = self.downsample3(x, loc)\n x = self.conv3(x, loc)\n x = self.bn3(x)\n x = self.act(x)\n x = self.conv4(x, loc)\n x = self.bn4(x)\n x = self.act(x)\n h = torch.flatten(x, 1)\n z = self.fc(h)\n return z, h"
},
{
"identifier": "DrasCLR",
"path": "models/builder.py",
"snippet": "class DrasCLR(nn.Module):\n\n def __init__(self, base_encoder, num_patch, rep_dim, moco_dim, num_experts, num_coordinates, K, m, T, mlp):\n \"\"\"\n dim: feature dimension (default: 128)\n K: queue size; number of negative keys (default: 65536)\n m: moco momentum of updating key encoder (default: 0.999)\n T: softmax temperature (default: 0.07)\n \"\"\"\n super(DrasCLR, self).__init__()\n\n self.K = K\n self.m = m\n self.T = T\n self.num_locs = num_patch # add the new dimension of number of locations\n\n # create the encoders\n # num_classes is the output fc dimension\n self.encoder_q = base_encoder(rep_dim=rep_dim, moco_dim=moco_dim, num_experts=num_experts, num_coordinates=num_coordinates)\n self.encoder_k = base_encoder(rep_dim=rep_dim, moco_dim=moco_dim, num_experts=num_experts, num_coordinates=num_coordinates)\n\n if mlp: # hack: brute-force replacement\n dim_mlp = self.encoder_q.fc.weight.shape[1]\n self.encoder_q.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_q.fc)\n self.encoder_k.fc = nn.Sequential(nn.Linear(dim_mlp, dim_mlp), nn.ReLU(), self.encoder_k.fc)\n\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data.copy_(param_q.data) # initialize\n param_k.requires_grad = False # not update by gradient\n\n # create the queue\n self.register_buffer(\"queue\", torch.randn(moco_dim, K, self.num_locs)) # the queue should be the size of (dim of reps) * (number of negative pairs) * (number of total locations)\n self.queue = nn.functional.normalize(self.queue, dim=0) # normalize patch representation\n self.register_buffer(\"queue_ptr\", torch.zeros(self.num_locs, dtype=torch.long)) # set pointer in buffer to 1 for each path location\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, keys, patch_idx):\n # gather keys before updating queue\n keys = concat_all_gather(keys)\n\n batch_size = keys.shape[0]\n\n ptr = self.queue_ptr\n assert self.K % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n self.queue[:, ptr[patch_idx]:ptr[patch_idx] + batch_size, patch_idx] = keys.T\n ptr[patch_idx] = (ptr[patch_idx] + batch_size) % self.K # move pointer\n\n self.queue_ptr = ptr\n\n @torch.no_grad()\n def _batch_shuffle_ddp(self, x):\n \"\"\"\n Batch shuffle, for making use of BatchNorm.\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # random shuffle index\n idx_shuffle = torch.randperm(batch_size_all).cuda()\n\n # broadcast to all gpus\n torch.distributed.broadcast(idx_shuffle, src=0)\n\n # index for restoring\n idx_unshuffle = torch.argsort(idx_shuffle)\n\n # shuffled index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this], idx_unshuffle\n\n @torch.no_grad()\n def _batch_unshuffle_ddp(self, x, idx_unshuffle):\n \"\"\"\n Undo batch shuffle.\n *** Only support DistributedDataParallel (DDP) model. ***\n \"\"\"\n # gather from all gpus\n batch_size_this = x.shape[0]\n x_gather = concat_all_gather(x)\n batch_size_all = x_gather.shape[0]\n\n num_gpus = batch_size_all // batch_size_this\n\n # restored index for this gpu\n gpu_idx = torch.distributed.get_rank()\n idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]\n\n return x_gather[idx_this]\n\n def forward(self, patch_idx, pch_q, pch_k, ngb_q):\n \"\"\"\n Input:\n im_q: a batch of query images\n im_k: a batch of key images\n Output:\n logits, targets\n \"\"\"\n # compute query patch features\n q, h_q = self.encoder_q(pch_q[0], pch_q[1]) # queries: NxC, encoder needs to take both pathces and their locations as inputs\n q = nn.functional.normalize(q, dim=1)\n\n # compute query neighbor features\n ngb_flatten = ngb_q[0].reshape(-1, 32, 32, 32)\n loc_flatten = ngb_q[1].reshape(-1, 3)\n r, h_r = self.encoder_q(ngb_flatten[:, None, :, :, :], loc_flatten)\n r = nn.functional.normalize(r, dim=1)\n r = r.reshape(ngb_q[0].shape[0], ngb_q[0].shape[1], -1) # queries: N * R * C, samples * k-neighbors * channels\n\n # compute key features\n with torch.no_grad(): # no gradient to keys\n self._momentum_update_key_encoder() # update the key encoder\n\n # shuffle for making use of BN\n pch_k[0], idx_unshuffle = self._batch_shuffle_ddp(pch_k[0])\n\n k, h_k = self.encoder_k(pch_k[0], pch_k[1]) # keys: N * C\n k = nn.functional.normalize(k, dim=1)\n\n # undo shuffle\n k = self._batch_unshuffle_ddp(k, idx_unshuffle)\n\n # patch InfoNCE logits\n # Einstein sum is more intuitive\n # positive logits: N * 1\n l_pos_pch = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)\n # negative logits: N * K\n negs = self.queue[:,:,patch_idx].clone().detach() # compute negative logits for each path in the batch conditioned on their locations\n l_neg_pch = torch.einsum('nc,ck->nk', [q, negs])\n # logits: N * (1+K)\n logits_pch = torch.cat([l_pos_pch, l_neg_pch], dim=1)\n # apply temperature\n logits_pch /= self.T\n\n # neighbor InfoNCE logits\n # positive logits: N * 1\n l_pos_ngb = torch.einsum('nrc, nc->n', [r, k]).unsqueeze(-1)\n # negative logits: N * K\n l_neg_ngb = torch.einsum('nrc, ck->nk', [r, negs])\n # logits: N * (1+K)\n logits_ngb = torch.cat([l_pos_ngb, l_neg_ngb], dim=1)\n # apply temperature\n logits_ngb /= self.T\n\n # labels: positive key indicators\n labels = torch.zeros(logits_pch.shape[0], dtype=torch.long).cuda()\n\n # dequeue and enqueue\n self._dequeue_and_enqueue(k, patch_idx) # consider location for each patch in the batch\n\n return logits_pch, logits_ngb, labels"
},
{
"identifier": "COPD_dataset",
"path": "data/copd_patch.py",
"snippet": "class COPD_dataset(Dataset):\n\n def __init__(self, stage, args, patch_transforms=default_transform, neighbor_transforms=default_transform):\n self.stage = stage\n self.args = args\n self.root_dir = args.root_dir\n self.metric_dict = dict() # initialize metric dictionary\n self.patch_transforms = patch_transforms\n self.neighbor_transforms = neighbor_transforms\n\n # atlas patch locations, our refernce file can be found at ./preprocess/misc/atlas_patch_loc.npy\n self.patch_loc = np.load(self.args.root_dir + \"19676E_INSP_STD_JHU_COPD_BSpline_Iso1_patch_loc.npy\")\n # pairwise distance\n self.dists = pairwise_distances(self.patch_loc, metric='euclidean')\n # normalize patch locations\n self.patch_loc = (self.patch_loc / self.patch_loc.max(0)) * 2 - 1 # normalize position to [-1, 1]\n\n self.patch_idx = 0\n self.patch_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.patch_idx)+\".npy\")\n # top k nearest patches\n self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]\n neighbor_lst = []\n for k in range(self.args.k_neighbors):\n neighbor_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.k_neighbor_idx[k])+\".npy\")\n neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32\n self.neighbor_data = np.concatenate(neighbor_lst, axis=0)\n del neighbor_lst\n\n if stage == 'training':\n # Specific to COPDGene dataset, you can change depends on your needs\n FILE = open(DATA_DIR + \"phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.label_name]\n race_idx = mylist.index(\"race\")\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n tmp = [mylist[idx] for idx in metric_idx]\n if \"\" in tmp:\n continue\n if self.args.nhw_only and mylist[race_idx] != \"1\":\n continue\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]] = metric_list\n FILE.close()\n\n if stage == 'testing':\n # Specific to COPDGene dataset, you can change depends on your needs\n self.label_name = self.args.label_name + self.args.label_name_set2\n FILE = open(DATA_DIR + \"phase1_Final_10K/phase 1 Pheno/Final10000_Phase1_Rev_28oct16.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.label_name]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n tmp = [mylist[idx] for idx in metric_idx]\n if \"\" in tmp[:3]:\n continue\n metric_list = []\n for i in range(len(metric_idx)):\n if tmp[i] == \"\":\n metric_list.append(-1024)\n else:\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]] = metric_list + [-1024, -1024, -1024]\n FILE = open(DATA_DIR + \"CT_scan_datasets/CT_visual_scoring/COPDGene_CT_Visual_20JUL17.txt\", \"r\")\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.visual_score]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n if mylist[0] not in self.metric_dict:\n continue\n tmp = [mylist[idx] for idx in metric_idx]\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]][\n -len(self.args.visual_score) - len(self.args.P2_Pheno):-len(self.args.P2_Pheno)] = metric_list\n FILE.close()\n FILE = open(\n DATA_DIR + 'P1-P2 First 5K Long Data/Subject-flattened- one row per subject/First5000_P1P2_Pheno_Flat24sep16.txt',\n 'r')\n mylist = FILE.readline().strip(\"\\n\").split(\"\\t\")\n metric_idx = [mylist.index(label) for label in self.args.P2_Pheno]\n for line in FILE.readlines():\n mylist = line.strip(\"\\n\").split(\"\\t\")\n if mylist[0] not in self.metric_dict:\n continue\n tmp = [mylist[idx] for idx in metric_idx]\n metric_list = []\n for i in range(len(metric_idx)):\n metric_list.append(float(tmp[i]))\n self.metric_dict[mylist[0]][-len(self.args.P2_Pheno):] = metric_list\n FILE.close()\n\n self.sid_list = []\n for item in glob.glob(self.args.root_dir+\"patch/\"+\"*_patch.npy\"):\n if item.split('/')[-1][:6] not in self.metric_dict:\n continue\n self.sid_list.append(item.split('/')[-1][:-10])\n self.sid_list.sort()\n assert len(self.sid_list) == self.patch_data.shape[0]\n\n print(\"Fold: full\")\n self.sid_list = np.asarray(self.sid_list)\n self.sid_list_len = len(self.sid_list)\n print(stage+\" dataset size:\", self.sid_list_len)\n\n def set_patch_idx(self, patch_idx):\n self.patch_idx = patch_idx\n self.patch_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.patch_idx)+\".npy\")\n # top k nearest patches\n self.k_neighbor_idx = np.argsort(self.dists[self.patch_idx,:])[1: (self.args.k_neighbors+1)]\n neighbor_lst = []\n for k in range(self.args.k_neighbors):\n neighbor_data = np.load(self.args.root_dir+\"grouped_patch/patch_loc_\"+str(self.k_neighbor_idx[k])+\".npy\")\n neighbor_lst.append(neighbor_data[None, :, :, :, :]) # 1 * 9179 * 32 * 32 * 32\n self.neighbor_data = np.concatenate(neighbor_lst, axis=0)\n del neighbor_lst\n\n def __len__(self):\n if self.stage == 'training':\n return self.sid_list_len * self.args.num_patch\n if self.stage == 'testing':\n return self.sid_list_len\n\n def __getitem__(self, idx):\n\n if self.stage == 'training':\n idx = idx % self.sid_list_len\n\n # patch data\n pch = self.patch_data[idx, :, :, :]\n pch = np.clip(pch, -1024, 240) # clip input intensity to [-1024, 240]\n pch = pch + 1024.\n pch = self.patch_transforms(pch[None, :, :, :])\n pch[0] = pch[0]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n pch[1] = pch[1]/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n # patch location\n patch_loc_idx = self.patch_loc[self.patch_idx, :]\n\n # neighbor data\n ngb = self.neighbor_data[:, idx, :, :, :]\n ngb = np.clip(ngb, -1024, 240) # clip input intensity to [-1024, 240]\n ngb = ngb + 1024.\n ngb = self.neighbor_transforms(ngb)\n ngb = ngb/632.-1 # Normalize to [-1,1], 632=(1024+240)/2\n # neighbor location\n neighor_loc_idx = self.patch_loc[self.k_neighbor_idx, :]\n\n # labels\n key = self.sid_list[idx][:6]\n label = np.asarray(self.metric_dict[key])\n return key, pch, patch_loc_idx, ngb, neighor_loc_idx, label\n\n if self.stage == 'testing':\n sid = self.sid_list[idx]\n\n # read the entire image including 581 patches\n img = np.load(self.root_dir + \"patch/\" + sid + \"_patch.npy\")\n img = np.clip(img, -1024, 240) # clip input intensity to [-1024, 240]\n img = img + 1024.\n img = img[:, None, :, :, :] / 632. - 1 # Normalize to [-1,1], 632=(1024+240)/2\n\n # patch locations for all 581 patches\n patch_loc_idx = self.patch_loc\n\n # study id\n key = self.sid_list[idx][:6]\n\n # labels\n label = np.asarray(self.metric_dict[key]) # extract sid from the first 6 letters\n\n return sid, img, patch_loc_idx, label"
}
] | import os
import argparse
import builtins
import math
import random
import shutil
import time
import warnings
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import models.loader as DrasCLR_Loader
from tensorboard_logger import configure, log_value
from models.cnn3d import Encoder
from models.builder import DrasCLR
from data.copd_patch import COPD_dataset
from monai.transforms import Compose, RandGaussianNoise, RandAffine, Rand3DElastic, RandAdjustContrast | 6,901 | help='root directory of registered images in COPD dataset')
parser.add_argument('--label-name', default=["FEV1pp_utah", "FEV1_FVC_utah", "finalGold"], nargs='+',
help='phenotype label names')
parser.add_argument('--label-name-set2', default=["Exacerbation_Frequency", "MMRCDyspneaScor"], nargs='+',
help='phenotype label names')
parser.add_argument('--visual-score', default=["Emph_Severity", "Emph_Paraseptal"], nargs='+',
help='phenotype label names')
parser.add_argument('--P2-Pheno', default=["Exacerbation_Frequency_P2"], nargs='+',
help='phenotype label names')
parser.add_argument('--nhw-only', action='store_true',
help='only include white people')
parser.add_argument('--fold', default=0, type=int,
help='fold index of cross validation')
# MoCo specific configs:
parser.add_argument('--rep-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=4096, type=int,
help='queue size; number of negative keys (default: 4098)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.2, type=float,
help='softmax temperature (default: 0.2)')
# options for moco v2
parser.add_argument('--mlp', action='store_false',
help='use mlp head')
parser.add_argument('--cos', action='store_false',
help='use cosine lr schedule')
# experiment configs
parser.add_argument('--adj-thres', default=0.18, type=float,
help='patch adjacent threshold (default: 0.18)')
parser.add_argument('--k-neighbors', default=2, type=int,
help='top k nearest neighbors of the anchor patch in the atlas image.')
parser.add_argument('--beta', default=1.0, type=float,
help='scaling factor of neighbor InfoNCE loss. (default: 1.0)')
parser.add_argument('--warm-up', default=0, type=int,
help='number of warm-up epochs before training neighbor contrastive loss.')
parser.add_argument('--num-experts', default=8, type=int,
help='number of experts in CondConv layer.')
parser.add_argument('--num-coordinates', default=1, type=int,
help='number of input coordinates.')
parser.add_argument('--augmentation', default='agc',
help='initials of augmentation including: (f)lip, (a)ffine, (e)lastic, (g)uassian, (c)ontrast.')
parser.add_argument('--exp-name', default='debug_patch', type=str,
help='experiment name')
def main():
# read configurations
args = parser.parse_args()
# define and create the experiment directory
exp_dir = os.path.join('./ssl_exp', args.exp_name)
if not os.path.isdir(exp_dir):
os.makedirs(exp_dir, exist_ok=True)
# save configurations to a dictionary
with open(os.path.join(exp_dir, 'configs.json'), 'w') as f:
json.dump(vars(args), f, indent=2)
f.close()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print("Distributed:", args.distributed)
#ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = args.npgus_per_node
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
configure(os.path.join('./ssl_exp', args.exp_name))
# create patch-level encoder
|
parser = argparse.ArgumentParser(description='3D CT Images Self-Supervised Training Patch-level')
parser.add_argument('--arch', metavar='ARCH', default='custom')
parser.add_argument('--workers', default=0, type=int, metavar='N',
help='patch-level number of data loading workers (default: 0)')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch-size', default=64, type=int,
metavar='N',
help='patch-level mini-batch size (default: 32), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--schedule', default=[120, 160], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum of SGD solver')
parser.add_argument('--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest patch-level checkpoint (default: None)')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://localhost:10000', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=0, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_false',
help='use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--npgus-per-node', default=2, type=int,
help='number of gpus per node.')
# image data configs:
parser.add_argument('--stage', default='training', type=str,
help='stage: training or testing')
parser.add_argument('--num-patch', default=581, type=int,
help='total number of patches in the atlas image.')
parser.add_argument('--root-dir', default='/ocean/projects/asc170022p/lisun/copd/gnn_shared/data/patch_data_32_6_reg_mask/',
help='root directory of registered images in COPD dataset')
parser.add_argument('--label-name', default=["FEV1pp_utah", "FEV1_FVC_utah", "finalGold"], nargs='+',
help='phenotype label names')
parser.add_argument('--label-name-set2', default=["Exacerbation_Frequency", "MMRCDyspneaScor"], nargs='+',
help='phenotype label names')
parser.add_argument('--visual-score', default=["Emph_Severity", "Emph_Paraseptal"], nargs='+',
help='phenotype label names')
parser.add_argument('--P2-Pheno', default=["Exacerbation_Frequency_P2"], nargs='+',
help='phenotype label names')
parser.add_argument('--nhw-only', action='store_true',
help='only include white people')
parser.add_argument('--fold', default=0, type=int,
help='fold index of cross validation')
# MoCo specific configs:
parser.add_argument('--rep-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--moco-k', default=4096, type=int,
help='queue size; number of negative keys (default: 4098)')
parser.add_argument('--moco-m', default=0.999, type=float,
help='moco momentum of updating key encoder (default: 0.999)')
parser.add_argument('--moco-t', default=0.2, type=float,
help='softmax temperature (default: 0.2)')
# options for moco v2
parser.add_argument('--mlp', action='store_false',
help='use mlp head')
parser.add_argument('--cos', action='store_false',
help='use cosine lr schedule')
# experiment configs
parser.add_argument('--adj-thres', default=0.18, type=float,
help='patch adjacent threshold (default: 0.18)')
parser.add_argument('--k-neighbors', default=2, type=int,
help='top k nearest neighbors of the anchor patch in the atlas image.')
parser.add_argument('--beta', default=1.0, type=float,
help='scaling factor of neighbor InfoNCE loss. (default: 1.0)')
parser.add_argument('--warm-up', default=0, type=int,
help='number of warm-up epochs before training neighbor contrastive loss.')
parser.add_argument('--num-experts', default=8, type=int,
help='number of experts in CondConv layer.')
parser.add_argument('--num-coordinates', default=1, type=int,
help='number of input coordinates.')
parser.add_argument('--augmentation', default='agc',
help='initials of augmentation including: (f)lip, (a)ffine, (e)lastic, (g)uassian, (c)ontrast.')
parser.add_argument('--exp-name', default='debug_patch', type=str,
help='experiment name')
def main():
# read configurations
args = parser.parse_args()
# define and create the experiment directory
exp_dir = os.path.join('./ssl_exp', args.exp_name)
if not os.path.isdir(exp_dir):
os.makedirs(exp_dir, exist_ok=True)
# save configurations to a dictionary
with open(os.path.join(exp_dir, 'configs.json'), 'w') as f:
json.dump(vars(args), f, indent=2)
f.close()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.benchmark = True
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
print("Distributed:", args.distributed)
#ngpus_per_node = torch.cuda.device_count()
ngpus_per_node = args.npgus_per_node
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
if args.rank == 0:
configure(os.path.join('./ssl_exp', args.exp_name))
# create patch-level encoder | model = DrasCLR( | 1 | 2023-12-09 02:33:53+00:00 | 8k |
CHDers/Traffic-Flow-Prediction-with-Graph-Neural-Networks | traffic_prediction.py | [
{
"identifier": "LoadData",
"path": "traffic_dataset.py",
"snippet": "class LoadData(Dataset): # 这个就是把读入的数据处理成模型需要的训练数据和测试数据,一个一个样本能读取出来\n def __init__(self, data_path, num_nodes, divide_days, time_interval, history_length, train_mode):\n \"\"\"\n :param data_path: list, [\"graph file name\" , \"flow data file name\"], path to save the data file names.\n :param num_nodes: int, number of nodes.\n :param divide_days: list, [ days of train data, days of test data], list to divide the original data.\n :param time_interval: int, time interval between two traffic data records (mins).---5 mins\n :param history_length: int, length of history data to be used.\n :param train_mode: list, [\"train\", \"test\"].\n \"\"\"\n\n self.data_path = data_path\n self.num_nodes = num_nodes\n self.train_mode = train_mode\n self.train_days = divide_days[0] # 59-14 = 45, train_data\n self.test_days = divide_days[1] # 7*2 = 14 ,test_data\n self.history_length = history_length # 30/5 = 6, 历史长度为6\n self.time_interval = time_interval # 5 min\n\n self.one_day_length = int(24 * 60 / self.time_interval) # 一整天的数据量\n\n self.graph = get_adjacent_matrix(distance_file=data_path[0], num_nodes=num_nodes)\n\n self.flow_norm, self.flow_data = self.pre_process_data(data=get_flow_data(data_path[1]),\n norm_dim=1) # self.flow_norm为归一化的基\n\n def __len__(self): # 表示数据集的长度\n \"\"\"\n :return: length of dataset (number of samples).\n \"\"\"\n if self.train_mode == \"train\":\n return self.train_days * self.one_day_length - self.history_length # 训练的样本数 = 训练集总长度 - 历史数据长度\n elif self.train_mode == \"test\":\n return self.test_days * self.one_day_length # 每个样本都能测试,测试样本数 = 测试总长度\n else:\n raise ValueError(\"train mode: [{}] is not defined\".format(self.train_mode))\n\n def __getitem__(self, index): # 功能是如何取每一个样本 (x, y), index = [0, L1 - 1]这个是根据数据集的长度确定的\n \"\"\"\n :param index: int, range between [0, length - 1].\n :return:\n graph: torch.tensor, [N, N].\n data_x: torch.tensor, [N, H, D].\n data_y: torch.tensor, [N, 1, D].\n \"\"\"\n if self.train_mode == \"train\":\n index = index # 训练集的数据是从时间0开始的,这个是每一个流量数据,要和样本(x,y)区别\n elif self.train_mode == \"test\":\n index += self.train_days * self.one_day_length # 有一个偏移量\n else:\n raise ValueError(\"train mode: [{}] is not defined\".format(self.train_mode))\n\n data_x, data_y = LoadData.slice_data(self.flow_data, self.history_length, index, self.train_mode) # 这个就是样本(x,y)\n\n data_x = LoadData.to_tensor(data_x) # [N, H, D] # 转换成张量\n data_y = LoadData.to_tensor(data_y).unsqueeze(1) # [N, 1, D] # 转换成张量,在时间维度上扩维\n\n return {\"graph\": LoadData.to_tensor(self.graph), \"flow_x\": data_x, \"flow_y\": data_y} # 组成词典返回\n\n @staticmethod\n def slice_data(data, history_length, index, train_mode): # 根据历史长度,下标来划分数据样本\n \"\"\"\n :param data: np.array, normalized traffic data.\n :param history_length: int, length of history data to be used.\n :param index: int, index on temporal axis.\n :param train_mode: str, [\"train\", \"test\"].\n :return:\n data_x: np.array, [N, H, D].\n data_y: np.array [N, D].\n \"\"\"\n if train_mode == \"train\":\n start_index = index # 开始下标就是时间下标本身,这个是闭区间\n end_index = index + history_length # 结束下标,这个是开区间\n elif train_mode == \"test\":\n start_index = index - history_length # 开始下标,这个最后面贴图了,可以帮助理解\n end_index = index # 结束下标\n else:\n raise ValueError(\"train model {} is not defined\".format(train_mode))\n\n data_x = data[:, start_index: end_index] # 在切第二维,不包括end_index\n data_y = data[:, end_index] # 把上面的end_index取上\n\n return data_x, data_y\n\n @staticmethod\n def pre_process_data(data, norm_dim): # 预处理,归一化\n \"\"\"\n :param data: np.array,原始的交通流量数据\n :param norm_dim: int,归一化的维度,就是说在哪个维度上归一化,这里是在dim=1时间维度上\n :return:\n norm_base: list, [max_data, min_data], 这个是归一化的基.\n norm_data: np.array, normalized traffic data.\n \"\"\"\n norm_base = LoadData.normalize_base(data, norm_dim) # 计算 normalize base\n norm_data = LoadData.normalize_data(norm_base[0], norm_base[1], data) # 归一化后的流量数据\n\n return norm_base, norm_data # 返回基是为了恢复数据做准备的\n\n @staticmethod\n def normalize_base(data, norm_dim): # 计算归一化的基\n \"\"\"\n :param data: np.array, 原始的交通流量数据\n :param norm_dim: int, normalization dimension.归一化的维度,就是说在哪个维度上归一化,这里是在dim=1时间维度上\n :return:\n max_data: np.array\n min_data: np.array\n \"\"\"\n max_data = np.max(data, norm_dim, keepdims=True) # [N, T, D] , norm_dim=1, [N, 1, D], keepdims=True就保持了纬度一致\n min_data = np.min(data, norm_dim, keepdims=True)\n\n return max_data, min_data # 返回最大值和最小值\n\n @staticmethod\n def normalize_data(max_data, min_data, data): # 计算归一化的流量数据,用的是最大值最小值归一化法\n \"\"\"\n :param max_data: np.array, max data.\n :param min_data: np.array, min data.\n :param data: np.array, original traffic data without normalization.\n :return:\n np.array, normalized traffic data.\n \"\"\"\n mid = min_data\n base = max_data - min_data\n normalized_data = (data - mid) / base\n\n return normalized_data\n\n @staticmethod\n def recover_data(max_data, min_data, data): # 恢复数据时使用的,为可视化比较做准备的\n \"\"\"\n :param max_data: np.array, max data.\n :param min_data: np.array, min data.\n :param data: np.array, normalized data.\n :return:\n recovered_data: np.array, recovered data.\n \"\"\"\n mid = min_data\n base = max_data - min_data\n\n recovered_data = data * base + mid\n\n return recovered_data # 这个就是原始的数据\n\n @staticmethod\n def to_tensor(data):\n return torch.tensor(data, dtype=torch.float)"
},
{
"identifier": "Evaluation",
"path": "utils.py",
"snippet": "class Evaluation(object):\n def __init__(self):\n pass\n\n @staticmethod\n def mae_(target, output):\n return np.mean(np.abs(target - output))\n\n @staticmethod\n def mape_(target, output):\n return np.mean(np.abs(target - output) / (target + 5)) # 加5是因为target有可能为0,当然只要不太大,加几都行\n\n @staticmethod\n def rmse_(target, output):\n return np.sqrt(np.mean(np.power(target - output, 2)))\n\n @staticmethod\n def total(target, output):\n mae = Evaluation.mae_(target, output)\n mape = Evaluation.mape_(target, output)\n rmse = Evaluation.rmse_(target, output)\n\n return mae, mape, rmse"
},
{
"identifier": "visualize_result",
"path": "utils.py",
"snippet": "def visualize_result(h5_file, nodes_id, time_se, visualize_file):\n file_obj = h5py.File(h5_file, \"r\") # 获得文件对象,这个文件对象有两个keys:\"predict\"和\"target\"\n prediction = file_obj[\"predict\"][:][:, :, 0] # [N, T],切片,最后一维取第0列,所以变成二维了,要是[:, :, :1]那么维度不会缩减\n target = file_obj[\"target\"][:][:, :, 0] # [N, T],同上\n file_obj.close()\n\n plot_prediction = prediction[nodes_id][time_se[0]: time_se[1]] # [T1],将指定节点的,指定时间的数据拿出来\n plot_target = target[nodes_id][time_se[0]: time_se[1]] # [T1],同上\n\n plt.figure()\n plt.grid(True, linestyle=\"-.\", linewidth=0.5)\n plt.plot(np.array([t for t in range(time_se[1] - time_se[0])]), plot_prediction, ls=\"-\", marker=\" \", color=\"r\")\n plt.plot(np.array([t for t in range(time_se[1] - time_se[0])]), plot_target, ls=\"-\", marker=\" \", color=\"b\")\n\n plt.legend([\"prediction\", \"target\"], loc=\"upper right\")\n\n plt.axis([0, time_se[1] - time_se[0],\n np.min(np.array([np.min(plot_prediction), np.min(plot_target)])),\n np.max(np.array([np.max(plot_prediction), np.max(plot_target)]))])\n\n plt.savefig(visualize_file + \".png\")"
},
{
"identifier": "GCN",
"path": "gcnnet.py",
"snippet": "class GCN(nn.Module): # GCN模型,向空域的第一个图卷积\n def __init__(self, in_c, hid_c, out_c):\n super(GCN, self).__init__() # 表示继承父类的所有属性和方法\n self.linear_1 = nn.Linear(in_c, hid_c) # 定义一个线性层\n self.linear_2 = nn.Linear(hid_c, out_c) # 定义一个线性层\n self.act = nn.ReLU() # 定义激活函数\n\n def forward(self, data, device):\n graph_data = data[\"graph\"].to(device)[0] # [N, N] 邻接矩阵,并且将数据送入设备\n graph_data = GCN.process_graph(graph_data) # 变换邻接矩阵 \\hat A = D_{-1/2}*A*D_{-1/2}\n\n flow_x = data[\"flow_x\"].to(device) # [B, N, H, D] 流量数据\n\n B, N = flow_x.size(0), flow_x.size(1) # batch_size、节点数\n\n flow_x = flow_x.view(B, N, -1) # [B, N, H*D] H = 6, D = 1把最后两维缩减到一起了,这个就是把历史时间的特征放一起\n\n # 第一个图卷积层\n output_1 = self.linear_1(flow_x) # [B, N, hid_C],这个就是 WX,其中W是可学习的参数,X是输入的流量数据(就是flow_x)\n output_1 = self.act(torch.matmul(graph_data, output_1)) # [B, N, N] ,[B, N, hid_c],就是 \\hat AWX\n\n # 第二个图卷积层\n output_2 = self.linear_2(output_1) # WX\n output_2 = self.act(torch.matmul(graph_data, output_2)) # [B, N, 1, Out_C] , 就是 \\hat AWX\n\n return output_2.unsqueeze(2) # 第2维的维度扩张\n\n @staticmethod\n def process_graph(graph_data): # 这个就是在原始的邻接矩阵之上,再次变换,也就是\\hat A = D_{-1/2}*A*D_{-1/2}\n N = graph_data.size(0) # 获得节点的个数\n matrix_i = torch.eye(N, dtype=torch.float, device=graph_data.device) # 定义[N, N]的单位矩阵\n graph_data += matrix_i # [N, N] ,就是 A+I\n\n degree_matrix = torch.sum(graph_data, dim=1, keepdim=False) # [N],计算度矩阵,塌陷成向量,其实就是将上面的A+I每行相加\n degree_matrix = degree_matrix.pow(-1) # 计算度矩阵的逆,若为0,-1次方可能计算结果为无穷大的数\n degree_matrix[degree_matrix == float(\"inf\")] = 0. # 让无穷大的数为0\n\n degree_matrix = torch.diag(degree_matrix) # 转换成对角矩阵\n\n return torch.mm(degree_matrix, graph_data) # 返回 \\hat A=D^(-1) * A ,这个等价于\\hat A = D_{-1/2}*A*D_{-1/2}"
},
{
"identifier": "ChebNet",
"path": "chebnet.py",
"snippet": "class ChebNet(nn.Module): # 定义图网络的类\n def __init__(self, in_c, hid_c, out_c, K):\n \"\"\"\n :param in_c: int, number of input channels.\n :param hid_c: int, number of hidden channels.class\n :param out_c: int, number of output channels.\n :param K:\n \"\"\"\n super(ChebNet, self).__init__()\n self.conv1 = ChebConv(in_c=in_c, out_c=hid_c, K=K) # 第一个图卷积层\n self.conv2 = ChebConv(in_c=hid_c, out_c=out_c, K=K) # 第二个图卷积层\n self.act = nn.ReLU() # 激活函数\n\n def forward(self, data, device):\n graph_data = data[\"graph\"].to(device)[0] # [N, N]\n flow_x = data[\"flow_x\"].to(device) # [B, N, H, D] # B是batch size,N是节点数,H是历史数据长度,D是特征维度\n\n B, N = flow_x.size(0), flow_x.size(1)\n\n flow_x = flow_x.view(B, N, -1) # [B, N, H*D] H = 6, D = 1把最后两维缩减到一起了,这个就是把历史时间的特征放一起\n\n output_1 = self.act(self.conv1(flow_x, graph_data))\n output_2 = self.act(self.conv2(output_1, graph_data))\n\n return output_2.unsqueeze(2) # 在第2维度,也就是时间维度上做扩张"
},
{
"identifier": "GATNet",
"path": "gat.py",
"snippet": "class GATNet(nn.Module):\n def __init__(self, in_c, hid_c, out_c, n_heads):\n super(GATNet, self).__init__()\n self.subnet = GATSubNet(in_c, hid_c, out_c, n_heads)\n\n def forward(self, data, device):\n graph = data[\"graph\"][0].to(device) # [N, N]\n flow = data[\"flow_x\"] # [B, N, T, C]\n flow = flow.to(device) # 将流量数据送入设备\n\n B, N = flow.size(0), flow.size(1)\n flow = flow.view(B, N, -1) # [B, N, T * C]\n \"\"\"\n 上面是将这一段的时间的特征数据摊平做为特征,这种做法实际上忽略了时序上的连续性\n 这种做法可行,但是比较粗糙,当然也可以这么做:\n flow[:, :, 0] ... flow[:, :, T-1] 则就有T个[B, N, C]这样的张量,也就是 [B, N, C]*T\n 每一个张量都用一个SubNet来表示,则一共有T个SubNet,初始化定义 self.subnet = [GATSubNet(...) for _ in range(T)]\n 然后用nn.ModuleList将SubNet分别拎出来处理,参考多头注意力的处理,同理\n\n \"\"\"\n\n prediction = self.subnet(flow, graph).unsqueeze(2) # [B, N, 1, C],这个1加上就表示预测的是未来一个时刻\n\n return prediction"
}
] | import os
import time
import h5py
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import warnings
from torch.utils.data import DataLoader
from traffic_dataset import LoadData
from utils import Evaluation # 三种评价指标以及可视化类
from utils import visualize_result
from gcnnet import GCN
from chebnet import ChebNet
from gat import GATNet
from rich import print
from tqdm import tqdm | 4,888 | # @Time : 2020/8/25
# @Author : LeronQ
# @github : https://github.com/LeronQ
# Pytorch-基于GCN/GAT/Chebnet图神经网络实现的交通流预测(附代码): https://blog.csdn.net/yilulvxing/article/details/110306999
# traffic_prediction.py
# 这个就是上一小节处理数据自己写的的类,封装在traffic_dataset.py文件中
warnings.filterwarnings('ignore')
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # 配置GPU,因为可能有多个GPU,这里用了第0号GPU
# 第一步:准备数据(上一节已经准备好了,这里只是调用而已,链接在最开头)
| # @Time : 2020/8/25
# @Author : LeronQ
# @github : https://github.com/LeronQ
# Pytorch-基于GCN/GAT/Chebnet图神经网络实现的交通流预测(附代码): https://blog.csdn.net/yilulvxing/article/details/110306999
# traffic_prediction.py
# 这个就是上一小节处理数据自己写的的类,封装在traffic_dataset.py文件中
warnings.filterwarnings('ignore')
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # 配置GPU,因为可能有多个GPU,这里用了第0号GPU
# 第一步:准备数据(上一节已经准备好了,这里只是调用而已,链接在最开头) | train_data = LoadData(data_path=["PeMS_04/PeMS04.csv", "PeMS_04/PeMS04.npz"], num_nodes=307, divide_days=[45, 14], | 0 | 2023-12-05 07:25:35+00:00 | 8k |
casiatao/PAD | detection/detectron2/modeling/backbone/vit_adapt.py | [
{
"identifier": "Backbone",
"path": "detection/detectron2/modeling/backbone/backbone.py",
"snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of arguments.\n \"\"\"\n super().__init__()\n\n @abstractmethod\n def forward(self):\n \"\"\"\n Subclasses must override this method, but adhere to the same return type.\n\n Returns:\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\n \"\"\"\n pass\n\n @property\n def size_divisibility(self) -> int:\n \"\"\"\n Some backbones require the input height and width to be divisible by a\n specific integer. This is typically true for encoder / decoder type networks\n with lateral connection (e.g., FPN) for which feature maps need to match\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\n input size divisibility is required.\n \"\"\"\n return 0\n\n @property\n def padding_constraints(self) -> Dict[str, int]:\n \"\"\"\n This property is a generalization of size_divisibility. Some backbones and training\n recipes require specific padding constraints, such as enforcing divisibility by a specific\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\n in :paper:vitdet). `padding_constraints` contains these optional items like:\n {\n \"size_divisibility\": int,\n \"square_size\": int,\n # Future options are possible\n }\n `size_divisibility` will read from here if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\n \"\"\"\n return {}\n\n def output_shape(self):\n \"\"\"\n Returns:\n dict[str->ShapeSpec]\n \"\"\"\n # this is a backward-compatible default\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }"
},
{
"identifier": "PatchEmbed",
"path": "detection/detectron2/modeling/backbone/utils.py",
"snippet": "class PatchEmbed(nn.Module):\n \"\"\"\n Image to Patch Embedding.\n \"\"\"\n\n def __init__(\n self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768\n ):\n \"\"\"\n Args:\n kernel_size (Tuple): kernel size of the projection layer.\n stride (Tuple): stride of the projection layer.\n padding (Tuple): padding size of the projection layer.\n in_chans (int): Number of input image channels.\n embed_dim (int): embed_dim (int): Patch embedding dimension.\n \"\"\"\n super().__init__()\n\n self.proj = nn.Conv2d(\n in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding\n )\n\n def forward(self, x):\n x = self.proj(x)\n # B C H W -> B H W C\n x = x.permute(0, 2, 3, 1)\n return x"
},
{
"identifier": "add_decomposed_rel_pos",
"path": "detection/detectron2/modeling/backbone/utils.py",
"snippet": "def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):\n \"\"\"\n Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950\n Args:\n attn (Tensor): attention map.\n q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).\n rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.\n rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.\n q_size (Tuple): spatial sequence size of query q with (q_h, q_w).\n k_size (Tuple): spatial sequence size of key k with (k_h, k_w).\n\n Returns:\n attn (Tensor): attention map with added relative positional embeddings.\n \"\"\"\n q_h, q_w = q_size\n k_h, k_w = k_size\n Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n\n B, _, dim = q.shape\n r_q = q.reshape(B, q_h, q_w, dim)\n rel_h = torch.einsum(\"bhwc,hkc->bhwk\", r_q, Rh)\n rel_w = torch.einsum(\"bhwc,wkc->bhwk\", r_q, Rw)\n\n attn = (\n attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n ).view(B, q_h * q_w, k_h * k_w)\n\n return attn"
},
{
"identifier": "get_abs_pos",
"path": "detection/detectron2/modeling/backbone/utils.py",
"snippet": "def get_abs_pos(abs_pos, has_cls_token, hw):\n \"\"\"\n Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token\n dimension for the original embeddings.\n Args:\n abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).\n has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.\n hw (Tuple): size of input image tokens.\n\n Returns:\n Absolute positional embeddings after processing with shape (1, H, W, C)\n \"\"\"\n h, w = hw\n if has_cls_token:\n abs_pos = abs_pos[:, 1:]\n xy_num = abs_pos.shape[1]\n size = int(math.sqrt(xy_num))\n assert size * size == xy_num\n\n if size != h or size != w:\n new_abs_pos = F.interpolate(\n abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),\n size=(h, w),\n mode=\"bicubic\",\n align_corners=False,\n )\n\n return new_abs_pos.permute(0, 2, 3, 1)\n else:\n return abs_pos.reshape(1, h, w, -1)"
},
{
"identifier": "window_partition",
"path": "detection/detectron2/modeling/backbone/utils.py",
"snippet": "def window_partition(x, window_size):\n \"\"\"\n Partition into non-overlapping windows with padding if needed.\n Args:\n x (tensor): input tokens with [B, H, W, C].\n window_size (int): window size.\n\n Returns:\n windows: windows after partition with [B * num_windows, window_size, window_size, C].\n (Hp, Wp): padded height and width before partition\n \"\"\"\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)"
},
{
"identifier": "window_unpartition",
"path": "detection/detectron2/modeling/backbone/utils.py",
"snippet": "def window_unpartition(windows, window_size, pad_hw, hw):\n \"\"\"\n Window unpartition into original sequences and removing padding.\n Args:\n x (tensor): input tokens with [B * num_windows, window_size, window_size, C].\n window_size (int): window size.\n pad_hw (Tuple): padded height and width (Hp, Wp).\n hw (Tuple): original height and width (H, W) before padding.\n\n Returns:\n x: unpartitioned sequences with [B, H, W, C].\n \"\"\"\n Hp, Wp = pad_hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n if Hp > H or Wp > W:\n x = x[:, :H, :W, :].contiguous()\n return x"
}
] | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous
from .backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
from timm.models.layers import DropPath, Mlp
from fairscale.nn.checkpoint import checkpoint_wrapper | 4,957 | window_block_indexes=(),
residual_block_indexes=(),
use_act_checkpoint=False,
pretrain_img_size=224,
pretrain_use_cls_token=True,
out_feature="last_feat",
down_size=64,
adapt_scalar="frozen",
init_value="0.0",
layernorm_option="in",
patch_wise_scalar=False,
fusion_method="concat",
):
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
window_block_indexes (list): Indexes for blocks using window attention.
residual_block_indexes (list): Indexes for blocks using conv propagation.
use_act_checkpoint (bool): If True, use activation checkpointing.
pretrain_img_size (int): input image size for pretraining models.
pretrain_use_cls_token (bool): If True, pretrainig models use class token.
out_feature (str): name of the feature from the last block.
"""
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i in window_block_indexes else 0,
use_residual_block=i in residual_block_indexes,
input_size=(img_size // patch_size, img_size // patch_size),
down_size=down_size,
adapt_scalar=adapt_scalar,
init_value=init_value,
layernorm_option=layernorm_option,
patch_wise_scalar=patch_wise_scalar,
fusion_method=fusion_method,
)
if use_act_checkpoint:
# TODO: use torch.utils.checkpoint
block = checkpoint_wrapper(block)
self.blocks.append(block)
self._out_feature_channels = {out_feature: embed_dim}
self._out_feature_strides = {out_feature: patch_size}
self._out_features = [out_feature]
if self.pos_embed is not None:
nn.init.trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
# init adapter
for i, blk in enumerate(self.blocks):
nn.init.kaiming_uniform_(blk.adaptmlp.down_proj.weight, a=math.sqrt(5))
nn.init.zeros_(blk.adaptmlp.up_proj.weight)
nn.init.zeros_(blk.adaptmlp.down_proj.bias)
nn.init.zeros_(blk.adaptmlp.up_proj.bias)
if patch_wise_scalar:
nn.init.zeros_(blk.scalar_pred.weight)
if blk.scalar_pred.bias is not None:
nn.init.zeros_(blk.scalar_pred.bias)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None:
|
logger = logging.getLogger(__name__)
__all__ = ["ViT_adapt", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"]
class Adapter(nn.Module):
def __init__(self,
d_model,
down_size = 64,
dropout=0.0,
adapter_scalar="frozen",
init_value="0.0",
adapter_layernorm_option="in",
patch_wise_scalar=False):
super().__init__()
self.n_embd = d_model
self.down_size = down_size
#_before
self.adapter_layernorm_option = adapter_layernorm_option
self.adapter_layer_norm_before = None
if adapter_layernorm_option == "in" or adapter_layernorm_option == "out":
self.adapter_layer_norm_before = nn.LayerNorm(self.n_embd)
self.patch_wise_scalar = patch_wise_scalar
if patch_wise_scalar:
self.scale = None
else:
if adapter_scalar == "learnable_scalar":
self.scale = nn.Parameter(torch.ones(1) * 0.5)
else:
if init_value != "0.0":
self.scale = float(init_value)
else:
self.register_buffer('scale', torch.ones(1) * 0.5)
self.down_proj = nn.Linear(self.n_embd, self.down_size)
self.non_linear_func = nn.ReLU()
self.up_proj = nn.Linear(self.down_size, self.n_embd)
self.dropout = dropout
def forward(self, x, add_residual=False, residual=None):
residual = x if residual is None else residual
if self.adapter_layernorm_option == 'in':
x = self.adapter_layer_norm_before(x)
down = self.down_proj(x)
down = self.non_linear_func(down)
down = nn.functional.dropout(down, p=self.dropout, training=self.training)
up = self.up_proj(down)
if add_residual:
output = up + residual
else:
output = up
return output, self.scale
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
use_rel_pos=False,
rel_pos_zero_init=True,
input_size=None,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool: If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
if not rel_pos_zero_init:
nn.init.trunc_normal_(self.rel_pos_h, std=0.02)
nn.init.trunc_normal_(self.rel_pos_w, std=0.02)
def forward(self, x):
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
class ResBottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block without the last activation layer.
It contains 3 conv layers with kernels 1x1, 3x3, 1x1.
"""
def __init__(
self,
in_channels,
out_channels,
bottleneck_channels,
norm="LN",
act_layer=nn.GELU,
):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
act_layer (callable): activation for all conv layers.
"""
super().__init__(in_channels, out_channels, 1)
self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = get_norm(norm, bottleneck_channels)
self.act1 = act_layer()
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
3,
padding=1,
bias=False,
)
self.norm2 = get_norm(norm, bottleneck_channels)
self.act2 = act_layer()
self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = get_norm(norm, out_channels)
for layer in [self.conv1, self.conv2, self.conv3]:
weight_init.c2_msra_fill(layer)
for layer in [self.norm1, self.norm2]:
layer.weight.data.fill_(1.0)
layer.bias.data.zero_()
# zero init last norm layer.
self.norm3.weight.data.zero_()
self.norm3.bias.data.zero_()
def forward(self, x):
out = x
for layer in self.children():
out = layer(out)
out = x + out
return out
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=True,
drop_path=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
use_residual_block=False,
input_size=None,
down_size=64,
adapt_scalar="frozen",
init_value="0.0",
layernorm_option="in",
patch_wise_scalar=False,
fusion_method='concat',
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then not
use window attention.
use_residual_block (bool): If True, use a residual block after the MLP block.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
self.patch_wise_scalar = patch_wise_scalar
self.fusion_method = fusion_method
self.window_size = window_size
self.adapt_scalar = adapt_scalar
self.adaptmlp = Adapter(dim, down_size=down_size, dropout=0.1,
adapter_scalar=adapt_scalar, init_value=init_value, adapter_layernorm_option=layernorm_option,
patch_wise_scalar=patch_wise_scalar)
if self.patch_wise_scalar:
if self.fusion_method == 'concat':
self.scalar_pred = nn.Linear(dim * 2, 1, bias=False)
elif self.fusion_method == 'sum' or self.fusion_method == 'side' or self.fusion_method == 'gside':
self.scalar_pred = nn.Linear(dim, 1, bias=False)
else:
raise ValueError("Only support fusion methods of concat and sum!")
self.scalar_act_layer = nn.Sigmoid()
self.use_residual_block = use_residual_block
if use_residual_block:
# Use a residual block with bottleneck channel as dim // 2
self.residual = ResBottleneckBlock(
in_channels=dim,
out_channels=dim,
bottleneck_channels=dim // 2,
norm="LN",
act_layer=act_layer,
)
def forward(self, x):
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + self.drop_path(x)
adapt_x, scale = self.adaptmlp(x, add_residual=False)
origin_mlp_x = self.mlp(self.norm2(x))
if self.patch_wise_scalar:
if self.fusion_method == 'concat':
scalar = self.scalar_pred(torch.concat([origin_mlp_x, adapt_x], axis=-1))
elif self.fusion_method == "sum":
scalar = self.scalar_pred(origin_mlp_x + adapt_x)
elif self.fusion_method == 'side':
scalar = self.scalar_pred(adapt_x)
elif self.fusion_method == 'gside':
scalar = self.scalar_pred(origin_mlp_x)
scalar = self.scalar_act_layer(scalar)
else:
scalar = scale
adapt_x = adapt_x * scalar
x = x + adapt_x
x = x + self.drop_path(origin_mlp_x)
if self.use_residual_block:
x = self.residual(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
return x
class ViT_adapt(Backbone):
"""
This module implements Vision Transformer (ViT) backbone in :paper:`vitdet`.
"Exploring Plain Vision Transformer Backbones for Object Detection",
https://arxiv.org/abs/2203.16527
"""
def __init__(
self,
img_size=1024,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
drop_path_rate=0.0,
norm_layer=nn.LayerNorm,
act_layer=nn.GELU,
use_abs_pos=True,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=0,
window_block_indexes=(),
residual_block_indexes=(),
use_act_checkpoint=False,
pretrain_img_size=224,
pretrain_use_cls_token=True,
out_feature="last_feat",
down_size=64,
adapt_scalar="frozen",
init_value="0.0",
layernorm_option="in",
patch_wise_scalar=False,
fusion_method="concat",
):
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path_rate (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
window_block_indexes (list): Indexes for blocks using window attention.
residual_block_indexes (list): Indexes for blocks using conv propagation.
use_act_checkpoint (bool): If True, use activation checkpointing.
pretrain_img_size (int): input image size for pretraining models.
pretrain_use_cls_token (bool): If True, pretrainig models use class token.
out_feature (str): name of the feature from the last block.
"""
super().__init__()
self.pretrain_use_cls_token = pretrain_use_cls_token
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
num_patches = (pretrain_img_size // patch_size) * (pretrain_img_size // patch_size)
num_positions = (num_patches + 1) if pretrain_use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_positions, embed_dim))
else:
self.pos_embed = None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i in window_block_indexes else 0,
use_residual_block=i in residual_block_indexes,
input_size=(img_size // patch_size, img_size // patch_size),
down_size=down_size,
adapt_scalar=adapt_scalar,
init_value=init_value,
layernorm_option=layernorm_option,
patch_wise_scalar=patch_wise_scalar,
fusion_method=fusion_method,
)
if use_act_checkpoint:
# TODO: use torch.utils.checkpoint
block = checkpoint_wrapper(block)
self.blocks.append(block)
self._out_feature_channels = {out_feature: embed_dim}
self._out_feature_strides = {out_feature: patch_size}
self._out_features = [out_feature]
if self.pos_embed is not None:
nn.init.trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
# init adapter
for i, blk in enumerate(self.blocks):
nn.init.kaiming_uniform_(blk.adaptmlp.down_proj.weight, a=math.sqrt(5))
nn.init.zeros_(blk.adaptmlp.up_proj.weight)
nn.init.zeros_(blk.adaptmlp.down_proj.bias)
nn.init.zeros_(blk.adaptmlp.up_proj.bias)
if patch_wise_scalar:
nn.init.zeros_(blk.scalar_pred.weight)
if blk.scalar_pred.bias is not None:
nn.init.zeros_(blk.scalar_pred.bias)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
x = self.patch_embed(x)
if self.pos_embed is not None: | x = x + get_abs_pos( | 3 | 2023-12-13 13:14:36+00:00 | 8k |
nickruggeri/hypergraph-message-passing | src/model/hypergraph_block_model.py | [
{
"identifier": "IncidenceHypergraph",
"path": "src/data/representation/incidence_hypergraph.py",
"snippet": "class IncidenceHypergraph(BinaryHypergraph):\n \"\"\"Representation of a binary hypergraph via its incidence matrix.\n The incidence matrix B is of size N x E, with N number of nodes in the hypergraph\n and E number of hyperedges. For each hyperedge e, the column of B with index e\n contains ones for the nodes belonging to the hyperedge e, zeros for all other nodes.\n \"\"\"\n\n def __init__(\n self,\n B: np.ndarray | sparse.spmatrix,\n sort_indices: bool = True,\n ):\n \"\"\"\n Parameters\n ----------\n B: incidence matrix, of shape (N, E).\n sort_indices: sort the indices in the internal sparse matrix representation.\n \"\"\"\n self.B = self._check_and_convert_incidence(B, sort_indices)\n self.N, self.E = self.B.shape\n\n hye_lengths = self.B.sum(axis=0)\n hye_counter = dict(Counter(hye_lengths))\n self.hye_count = hye_counter\n self.max_hye_size = max(hye_counter.keys())\n\n def get_repr(self) -> TYPE_INCIDENCE:\n return self.B\n\n def get_binary_incidence_matrix(self) -> TYPE_INCIDENCE:\n return self.B\n\n def sub_hyg(\n self,\n hyperedge_idx: np.ndarray | None = None,\n ) -> IncidenceHypergraph:\n \"\"\"Produce a sub-hypergraph where only the specified hyperedges are present.\n\n Parameters\n ----------\n hyperedge_idx: the list of the hyperedges to keep, specified by their indices.\n\n Returns\n -------\n The sub-hypergraph instance.\n \"\"\"\n if hyperedge_idx is None:\n return self\n\n B = self.B[:, hyperedge_idx]\n\n return IncidenceHypergraph(B)\n\n def __iter__(self) -> Iterable[np.ndarray]:\n return incidence_matrix_to_hye(self.B)\n\n def __str__(self):\n return f\"{self.__class__.__name__} with N={self.N}, E={self.E}\"\n\n @classmethod\n def load_from_txt(\n cls,\n hye_file: str | Path,\n N: int | None = None,\n ) -> IncidenceHypergraph:\n \"\"\"Load a IncidenceHypergraph instance from a txt file, containing the list of\n hyperedges.\n\n Parameters\n ----------\n hye_file: text file containing the hyperedges.\n N: number of nodes in the hypergraph.\n\n Returns\n -------\n An instance of IncidenceHypergraph.\n \"\"\"\n with open(hye_file, \"r\") as file:\n hye = (map(int, line.split(\" \")) for line in file.readlines())\n\n return cls.load_from_hye_list(hye, N)\n\n @classmethod\n def load_from_hye_list(\n cls, hye_list: list[Iterable[int]], N: int | None\n ) -> IncidenceHypergraph:\n hye = list(set(tuple(sorted(set(hyperedge))) for hyperedge in hye_list))\n shape = (N, len(hye)) if N else None\n B = hye_list_to_binary_incidence(hye, shape=shape)\n\n return IncidenceHypergraph(B)\n\n @staticmethod\n def _check_and_convert_incidence(\n incidence: np.ndarray | sparse.spmatrix, sort_indices: bool\n ) -> TYPE_INCIDENCE:\n incidence = TYPE_INCIDENCE(incidence)\n # When converting to other sparse types, repeated entries are summed. In such\n # case, there could be entries different from 1. Set them to 1.\n # Similarly, if a weighted matrix is provided as configurations, flatten all non-zero\n # entries to 1.\n if not np.all(incidence.data == 1):\n warnings.warn(\n \"The configurations matrix contains elements different from 0 and 1. \"\n \"All non-zero elements will be converted to 1.\"\n )\n incidence = incidence > 0\n\n if not np.all(incidence.data == 1):\n raise ValueError(\"The incidence matrix can only contain 1 and 0 values.\")\n\n if sort_indices:\n incidence.sort_indices()\n\n return incidence"
},
{
"identifier": "compute_C_prime",
"path": "src/model/kappa.py",
"snippet": "def compute_C_prime(max_hye_size: int) -> float:\n r\"\"\"Compute the :math::`C'` constant defined as\n .. math::\n C' := \\sum_{d=2}^D \\binom{N-2}{d-2} / \\kappa_d\n where D is the maximum hyperedge size, N the number of nodes in the hypergraph, and\n :math::`\\kappa_d` the normalizing constant.\n \"\"\"\n if max_hye_size in C_PRIME_VALS:\n return C_PRIME_VALS[max_hye_size]\n\n hye_dims = np.arange(2, max_hye_size + 1)\n c_prime = 2 * (1 / (hye_dims * (hye_dims - 1))).sum()\n C_PRIME_VALS[max_hye_size] = c_prime\n return c_prime"
},
{
"identifier": "compute_C_third",
"path": "src/model/kappa.py",
"snippet": "def compute_C_third(max_hye_size: int) -> float:\n r\"\"\"Compute the :math::`C'` constant defined as\n .. math::\n C''' := \\sum_{d=2}^D \\frac{1-d}{\\kappa_d} \\binom{N-2}{d-2} /\n where D is the maximum hyperedge size, N the number of nodes in the hypergraph, and\n :math::`\\kappa_d` the normalizing constant.\n \"\"\"\n hye_dims = np.arange(2, max_hye_size + 1)\n return -2 * (1 / hye_dims).sum()"
},
{
"identifier": "hyperedge_pi",
"path": "src/model/numerical.py",
"snippet": "def hyperedge_pi(hye_comm_counts: list[int], p: np.ndarray) -> float:\n r\"\"\"Compute the value of :math::`\\pi_e` for a hyperedge :math::`e`.\n The value is defined as:\n .. math::\n \\pi_e := \\sum_{i < j \\in e} \\p_{t_i t_j}\n\n where p is the affinity matrix and :math::`t_i` the community assignment of node i.\n\n Parameters\n ----------\n hye_comm_counts: a list of length K, where K is the number of communities. Every\n entry a of the list contains the number of nodes in the hyperedge belonging to\n community a.\n p: symmetric affinity matrix of probabilities in [0, 1].\n\n Returns\n -------\n The value of :math::`\\pi_e`.\n \"\"\"\n prob = 0\n for a, b in itertools.combinations(range(len(hye_comm_counts)), 2):\n prob += p[a, b] * hye_comm_counts[a] * hye_comm_counts[b]\n\n for a, count in enumerate(hye_comm_counts):\n prob += p[a, a] * count * (count - 1) / 2\n\n return prob"
},
{
"identifier": "sparse_reduce_lse",
"path": "src/model/numerical.py",
"snippet": "def sparse_reduce_lse(\n *args: sparse.csc_array | sparse.csr_array,\n) -> sparse.csc_array | sparse.csr_array:\n \"\"\"Perform the elementwise log-sum-exp operation on a sequence of sparse arrays.\n The arrays are assumed to have all the same pattern of non-zero entries, and to have\n sorted indices.\n \"\"\"\n data = np.stack([mat.data for mat in args], axis=1)\n lse_vals = special.logsumexp(data, axis=1)\n\n lse_mat = args[0].copy()\n lse_mat.data = lse_vals\n return lse_mat"
},
{
"identifier": "compute_psi_dynamic_programming",
"path": "src/model/dynamic_updates.py",
"snippet": "def compute_psi_dynamic_programming(\n hypergraph: IncidenceHypergraph,\n model: \"HypergraphBlockModel\",\n mask: np.ndarray | None = None,\n) -> list[sparse.coo_array]:\n \"\"\"Compute the psi quantities via dynamic programming.\n\n \"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order\n Information\", Ruggeri et al.\n\n Parameters\n ----------\n hypergraph: configurations hypergraph.\n model: configurations stochastic block model.\n mask: a boolean mask to compute the psi values only for specific (hyperedge, node)\n pairs.\n The mask needs to be a flattened boolean array with the same length as\n hypergraphs.get_binary_incidence_matrix().data\n\n Returns\n -------\n The psi values, results of the dynamic programming recursions.\n \"\"\"\n # The incidence matrix needs to be in CSC sparse format, the rest of the code\n # doesn't work otherwise.\n incidence: sparse.csc_array = hypergraph.get_binary_incidence_matrix()\n assert isinstance(incidence, sparse.csc_array), \"Incidence matrix is not CSC.\"\n # For coherence with the returned COO array at the end, the incidence matrix needs\n # to be in canonical sorted format. Otherwise, calling all_psi.tocsc() might result\n # in a matrix where non-zero indices do not correspond.\n assert incidence.has_sorted_indices, (\n \"The incidence matrix doesn't have a canonical sorted format. \"\n \"To fix this, call the sort_indices() method of scipy CSC matrices.\",\n )\n if mask is not None:\n assert mask.shape == (len(incidence.data),), (\n f\"The mask has shape {mask.shape}, \"\n f\"different from the incidence matrix data {incidence.data.shape}\"\n )\n\n log_node_to_hye = [x.tocsc() for x in model.log_node_to_hye]\n K = model.K\n\n def hyperedge_psi_(hye: int):\n nodes, psi = hyperedge_psi(\n incidence,\n hye,\n model.p,\n log_node_to_hye,\n eta_tilde=False,\n mask=mask,\n )\n return hye, nodes, psi\n\n res = Parallel(n_jobs=N_JOBS)(\n delayed(hyperedge_psi_)(hye) for hye in range(hypergraph.E)\n )\n\n nonzeros = mask.sum() if mask is not None else incidence.nnz\n hye_idx = np.zeros(nonzeros)\n node_idx = np.zeros(nonzeros)\n psi_vals = np.zeros((nonzeros, K))\n\n idx = itertools.count()\n for hye, nodes, psi in res:\n for i, node in enumerate(nodes):\n idx_ = next(idx)\n hye_idx[idx_] = hye\n node_idx[idx_] = node\n psi_vals[idx_, :] = psi[i, :]\n\n all_psi = [\n sparse.coo_array(\n (psi_vals[:, a], (node_idx, hye_idx)),\n shape=(hypergraph.N, hypergraph.E),\n )\n for a in range(K)\n ]\n\n return all_psi"
},
{
"identifier": "compute_psi_tilde_dynamic_programming",
"path": "src/model/dynamic_updates.py",
"snippet": "def compute_psi_tilde_dynamic_programming(\n hypergraph: IncidenceHypergraph,\n model: \"HypergraphBlockModel\",\n) -> np.ndarray:\n \"\"\"Compute the psi quantities via dynamic programming.\n\n \"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order\n Information\", Ruggeri et al.\n\n Parameters\n ----------\n hypergraph: configurations hypergraph.\n model: configurations stochastic block model.\n\n Returns\n -------\n The psi tilde values, results of the dynamic programming recursions.\n \"\"\"\n # Here we are assuming the incidence to be a CSC sparse array, the rest of the code\n # doesn't work otherwise.\n incidence: sparse.csc_array = hypergraph.get_binary_incidence_matrix()\n assert isinstance(\n incidence, sparse.csc_array\n ), \"Incidence matrix is not is CSC sparse format.\"\n log_node_to_hye = [x.tocsc() for x in model.log_node_to_hye]\n\n def hyperedge_psi_(hye):\n psi_tilde = hyperedge_psi(\n incidence, hye, model.p, log_node_to_hye, eta_tilde=True\n )\n return hye, psi_tilde\n\n res = Parallel(n_jobs=N_JOBS)(\n delayed(hyperedge_psi_)(hye) for hye in range(hypergraph.E)\n )\n\n all_psi = np.zeros(hypergraph.E)\n for hye, psi_val in res:\n all_psi[hye] = psi_val\n\n return all_psi"
}
] | import logging
import numpy as np
from collections import Counter
from typing import Iterable
from scipy import sparse, special
from src.data.representation.incidence_hypergraph import IncidenceHypergraph
from src.model.kappa import compute_C_prime, compute_C_third
from src.model.numerical import hyperedge_pi, sparse_reduce_lse
from .dynamic_updates import (
compute_psi_dynamic_programming,
compute_psi_tilde_dynamic_programming,
) | 5,872 | )
assert external_field.shape == (K,)
return C_prime / N * np.exp(external_field)
def single_hye_pi(self, assignments: Iterable[int]) -> float:
r"""Compute the hyperedge unnormalized probability.
For a hyperedge e and community assignments t, the unnormalized probability is
given by
.. math::
\pi_e := \sum_{i < j \in e} p_{t_i t_j}
Parameters
----------
assignments: community assignments.
This array contains the community assignments :math::`t_i` (with values
between 0 and K-1, where K is the number of communities) for all nodes i in
the hyperedge.
Returns
-------
The value of :math::`\pi_e`.
"""
K = self.K
hye_comm_counts = [0] * K
counts = Counter(assignments)
for comm, count in counts.items():
hye_comm_counts[comm] = count
return hyperedge_pi(hye_comm_counts, self.p)
def hye_pi(
self, hypergraph: IncidenceHypergraph, return_interactions: bool = False
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
r"""Compute the hyperedge unnormalized probabilities for all the hyperedges in
the hypergraph. For a hyperedge e, the unnormalized probability has form
.. math::
\pi_e := \sum_{i <j \in e} p_{t_i t_j}
with p affinity matrix and :math::`t_i` community assignment of node i.
Parameters
----------
hypergraph: the input hypergraph.
return_interactions: whether to optionally return the tensor of community
interactions within hyperedges, defined as, for any hyperedge e and
communities a, b:
.. math::
\#_{ab}^{(e)} := \sum_{i <j \in e} \delta_{t_i a} \delta_{t_j b}
where :math::`\delta_{xy}` is the Dirac delta, equal to 1 if :math::`x=y`,
else 0.
The tensor :math::`\#` has shape (E, K, K), with E number of hyperedges and
K number of communities.
Returns
-------
The array of :math::`\pi_e` values. Optionally, the tensor of :math::`\#`
values.
"""
E = hypergraph.E
K = self.K
p = self.p
incidence = hypergraph.get_binary_incidence_matrix()
onehot_assignments = np.zeros((self.N, K))
onehot_assignments[np.arange(self.N), self.community_assignments()] = 1
counts = incidence.transpose() @ onehot_assignments
assert counts.shape == (E, K)
del onehot_assignments
interactions = counts.reshape(E, 1, K) * counts.reshape(E, K, 1)
interactions[:, np.arange(K), np.arange(K)] = counts * (counts - 1) / 2
assert interactions.shape == (E, K, K)
del counts
pi = 0.5 * (
np.sum(interactions * p.reshape(1, K, K), axis=(1, 2))
+ np.inner(interactions[:, np.arange(K), np.arange(K)], np.diagonal(p))
)
if return_interactions:
return pi, interactions
return pi
def free_energy(self, hypergraph: IncidenceHypergraph) -> float:
"""Compute the free energy of a hypergraph utilizing the message passing
cavity approximations. The free energy, often denoted as :math::`F = -log Z`,
corresponds to the negative log-normalizing constant of the Boltzmann
distribution. Z is also called the evidence of the probabilistic model.
Parameters
----------
hypergraph: hypergraph.
Returns
-------
The log-likelihood value.
"""
self._check_hypergraph_vs_model_params(hypergraph)
K = self.K
N = self.N
external_field = self.compute_external_field()
ones = np.ones(hypergraph.E)
log_marginals = self.log_marginals
hye_dims = hypergraph.get_binary_incidence_matrix().sum(axis=0)
# Node-related addends.
f_i = [
x.tocsc().dot(ones) - external_field[k]
for k, x in enumerate(
compute_psi_dynamic_programming(hypergraph=hypergraph, model=self)
)
]
assert len(f_i) == K
assert all(x.shape == (N,) for x in f_i)
f_i = np.vstack(f_i).T
assert f_i.shape == (N, K)
f_i = special.logsumexp(a=f_i, b=self.n.reshape(1, -1), axis=1)
f_i_sum = f_i.sum()
# Edge-related addends.
# First addend.
| from __future__ import annotations
# Define the type of sparse matrix that is utilized to store the messages during message
# passing. These can be different for messages from hyperedges to nodes and from nodes
# to hyperedges.
TYPE_HYE_TO_NODE: sparse.spmatrix = sparse.csc_array
TYPE_NODE_TO_HYE: sparse.spmatrix = sparse.csc_array
CLIP_MIN: float = -30
CLIP_MAX: float = -1e-15
class HypergraphBlockModel:
"""Hypergraph version of the Stochastic Block Model, introduced in
"Message Passing on Hypergraphs: Detectability, Phase Transitions, and Higher-Order
Information", Ruggeri et al.
This probabilistic model for hypergraphs partitions the nodes into K hard
communities, specified by an array of assignments t. The communities interact
through a symmetric affinity matrix p, with shape (K, K). Together, the community
assignments t and the affinity matrix p define the Bernoulli probability of the
single hyperedges to be observed or not.
"""
def __init__(
self,
n: np.ndarray | None,
p: np.ndarray | None,
N: int,
K: int,
max_hye_size: int | None,
) -> None:
r"""Stochastic Block Model for Hypergraphs.
This version of SBM considers, for every node i, hard community assignments
:math::`t_i`, i.e. categorical assignments to one out of K communities.
Together with a (K, K) affinity matrix, these two parameters define the
likelihood for unweighted hypergraphs (i.e. hyperedges have weights in {0, 1}).
A prior :math::`n=(n_1, \ldots, n_K)` for the community assignments can also be
specified.
Parameters
----------
n: array of prior parameters for the communities.
If specified, this array is used as initialization for EM inference,
otherwise it is initialized at random.
The array has length K equal to the number of communities, and specifies the
categorical prior probabilities.
p: symmetric matrix of community interaction probabilities.
If specified, this matrix is used as initialization for EM inference,
otherwise it is initialized at random.
The matrix has shape (K, K), where K is the number of communities, and
contains the inter and intra-community interaction probabilities,
constrained to the [0, 1] interval.
N: number of nodes.
K: number of communities.
max_hye_size: maximum size of the hyperedges D.
Notice that this quantity is used to infer probabilistic quantities in the
model, but is not checked against input hypergraphs.
"""
# Model related attributes
self._check_params(n, p, K, N, max_hye_size)
self.n = n.copy() if n is not None else None
self.p = p.copy() if p is not None else None
self.N = N
self.K = K
self.max_hye_size: int = max_hye_size if max_hye_size is not None else N
# Quantities inferred after message passing.
# log of the messages from hyperedges to nodes. Stored as lists of sparse
# matrices. For every hyperedge e and node i, the matrix at position a in the
# list contains the messages from e to i, for community assignment a.
self.log_hye_to_node: list[TYPE_HYE_TO_NODE] | None = None
# log of the messages from nodes to hyperedges.
# They are encoded similarly to the messages above.
self.log_node_to_hye: list[TYPE_NODE_TO_HYE] | None = None
# Other quantities, log-marginals and external field
self.log_marginals: np.ndarray | None = None
self.external_field: np.ndarray | None = None
# Training diagnostics.
self.training_iter: int | None = None
self.n_diff: list[float] = []
self.c_diff: list[float] = []
self.log_marginal_diff: list[list[float]] = []
# Random number generator.
self.rng: np.random.Generator = np.random.default_rng()
@property
def c(self):
"""Return the rescaled affinity matrix c, defined as
.. math::
c = N p
where N is the number of nodes and p the affinity matrix.
"""
return self.p * self.N
def em_inference(
self,
hypergraph: IncidenceHypergraph,
em_iter: int = 20,
em_thresh: float = 1e-5,
mp_iter: int = 2000,
mp_thresh: float = 1e-5,
mp_patience: int = 50,
seed: int | None = None,
dirichlet_alpha: float | None = None,
dropout: float = 0.99,
) -> None:
"""Perform Expectation Maximization (EM) inference on a hypergraph.
The inference routine consist of alternating message passing, where the
community assignments :math::`t_i` are inferred, and updates to the global
parameters, i.e. the affinity matrix w and community priors n.
If the affinity w or priors n are provided at initialization of the model, these
are not inferred, but kept fixed.
Parameters
----------
hypergraph: hypergraph to perform inference on.
em_iter: maximum number of EM iterations.
One iteration consists of the message passing routine plus the global
parameter updates.
em_thresh: threshold for EM convergence.
The threshold is computed over the absolute difference of the community
priors and the affinity matrix between two consecutive EM iterations.
mp_iter: maximum number of message passing iterations.
mp_thresh: threshold for message passing convergence.
The threshold is computed over the absolute difference of the log-marginals
between two consecutive iterations.
mp_patience: number of steps below the mp_thresh.
After a number of consecutive iterations, specified by patience, with an
absolute change in log-marginals below the mp_thresh, the message passing
procedure is stopped.
seed: random seed.
dirichlet_alpha: parameter for the Dirichlet distribution.
Utilized for the initialization of the messages, which are drawn from a
uniform Dirichlet distribution with parameter alpha.
If None, alpha is chosen automatically.
dropout: dropout rate.
The dropout rate it the number of randomly discarded updates in the messages
and marginals. At every iteration of message passing, these discarded values
are kept at the previous iteration value.
"""
if seed is not None:
self.rng = np.random.default_rng(seed)
self._check_hypergraph_vs_model_params(hypergraph)
if self.n is None:
fixed_n = False
self._random_init_n()
logging.info(f"Initialized n prior:\n{self.n}")
else:
fixed_n = True
if self.p is None:
fixed_p = False
self._random_init_p()
logging.info(f"Initialized rescaled affinity c=N*p:\n{self.c}")
else:
fixed_p = True
for it in range(em_iter):
logging.info(f"EM iteration {it}")
# Local parameters: message passing.
self.parallel_message_passing(
hypergraph,
mp_iter=mp_iter,
mp_thresh=mp_thresh,
patience=mp_patience,
warm_start=True,
seed=None, # keep the current random number generator unaltered.
dirichlet_alpha=dirichlet_alpha,
dropout=dropout,
)
# Global parameters: EM updates.
if not fixed_n or not fixed_p:
logging.info("\tUpdates of priors n and affinity p...")
if not fixed_n:
old_n = self.n.copy()
self.n = self.updated_community_prior()
self.n_diff.append(np.abs(old_n - self.n).sum())
logging.info(
f"\tCommunity prior:\n{self.n}"
"\n\tDifference from previous iteration: "
f"{self.n_diff[-1]}"
)
if not fixed_p:
old_c = self.c.copy()
self.p = self.updated_affinity_matrix(hypergraph)
self.c_diff.append(np.abs(old_c - self.c).sum())
logging.info(
f"\tRescaled affinity matrix c=N*p:\n{self.c}"
"\n\tDifference from previous iteration:"
f"{self.c_diff[-1]}"
)
self.training_iter = it + 1
if not fixed_n or not fixed_p:
param_diff = 0.0
if not fixed_n:
param_diff += self.n_diff[-1]
if not fixed_p:
param_diff += self.c_diff[-1]
if param_diff <= em_thresh:
logging.info(
"Expectation-maximization threshold passed. "
"inference terminated."
)
break
def parallel_message_passing(
self,
hypergraph: IncidenceHypergraph,
mp_iter: int = 2000,
mp_thresh: float = 1.0e-5,
dirichlet_alpha: float | None = None,
dropout: float = 0.99,
patience: int = 50,
seed: int | None = None,
warm_start: bool = True,
) -> None:
"""Perform message passing inference of the node assignments.
Parameters
----------
hypergraph: a hypergraph.
mp_iter: maximum number of message passing iterations.
mp_thresh: threshold for message passing convergence.
The threshold is computed over the absolute difference of the log-marginals
between two consecutive iterations.
dirichlet_alpha: parameter for the Dirichlet distribution.
Utilized for the initialization of the messages, which are drawn from a
uniform Dirichlet distribution with parameter alpha.
If None, alpha is chosen automatically.
dropout: dropout rate.
The dropout rate it the number of randomly discarded updates in the messages
and marginals. At every iteration of message passing, these discarded values
are kept at the previous iteration value.
patience: number of steps below the mp_thresh.
After a number of consecutive iterations, specified by patience, with an
absolute change in log-marginals below the mp_thresh, the message passing
procedure is stopped.
seed: random seed.
warm_start: whether to re-initialize the messages and marginal beliefs.
"""
logging.info("\tMessage passing...")
if seed is not None:
self.rng = np.random.default_rng(seed)
self._check_hypergraph_vs_model_params(hypergraph)
all_messages_init = (
self.log_hye_to_node is not None
and self.log_node_to_hye is not None
and self.log_marginals is not None
and self.external_field is not None
)
if not warm_start or not all_messages_init:
alpha = 10.0 * self.K if dirichlet_alpha is None else dirichlet_alpha
self._init_message_passing(hypergraph, dirichlet_alpha=alpha)
logging.debug(
f"\t\tInitialized hye to node:\n{self.log_hye_to_node[0].data[:5]}"
)
logging.debug(
f"\t\tInitialized node to hye:\n{self.log_node_to_hye[0].data[:5]}"
)
logging.debug(f"\t\tInitialized marginals:\n{self.log_marginals[:5]}")
logging.debug(f"\t\tInitialized external field:\n{self.external_field}")
self.log_marginal_diff.append(list())
patience_count = 0
for i in range(mp_iter):
old_log_marginals = self.log_marginals.copy()
self._parallel_message_passing_step(hypergraph, dropout)
self.log_marginal_diff[-1].append(
np.abs(old_log_marginals - self.log_marginals).sum()
)
logging.info(
f"\t\tMP step {i} - difference in log-marginals from previous iter: "
f"{self.log_marginal_diff[-1][-1]}"
)
if self.log_marginal_diff[-1][-1] <= mp_thresh:
patience_count += 1
else:
patience_count = 0
if patience_count == patience:
logging.info(
"\tMessage passing threshold passed. Message passing terminated."
)
break
def _parallel_message_passing_step(
self,
hypergraph: IncidenceHypergraph,
dropout: float = 0.99,
) -> None:
"""Perform one step of message passing, updating the messages from nodes to
factors, the messages from factors to nodes, the marginal probabilities and
external field."""
inc = hypergraph.get_binary_incidence_matrix()
# Update node to hye.
new_node_to_hye = [None] * self.K
for assignment in range(self.K):
col_sum = self.log_hye_to_node[assignment].sum(axis=1)
assert col_sum.shape == (self.N,)
col_sum += np.log(self.n[assignment]) - self.external_field[assignment]
col_sum = col_sum.reshape((self.N, 1))
new_node_to_hye[assignment] = (
TYPE_HYE_TO_NODE(inc * col_sum) - self.log_hye_to_node[assignment]
)
norm = sparse_reduce_lse(*new_node_to_hye)
for assignment in range(self.K):
new_node_to_hye[assignment].data -= norm.data
new_node_to_hye[assignment].data = np.clip(
new_node_to_hye[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX
)
# TODO dropout could be made more efficient here. Do it or not?
if dropout > 0:
non_dropout_mask = (
self.rng.random(len(self.log_node_to_hye[0].data)) >= dropout
)
for assignment in range(self.K):
self.log_node_to_hye[assignment].data[
non_dropout_mask
] = new_node_to_hye[assignment].data[non_dropout_mask]
else:
for assignment in range(self.K):
self.log_node_to_hye[assignment].data = new_node_to_hye[assignment].data
logging.debug(f"\t\tUpdated node to hye:\n{self.log_node_to_hye[0].data[:5]}")
# Update hye to node.
if dropout > 0:
non_dropout_mask = (
self.rng.random(len(self.log_hye_to_node[0].data)) >= dropout
)
else:
non_dropout_mask = None
new_hye_to_node = [
TYPE_HYE_TO_NODE(x)
for x in compute_psi_dynamic_programming(
hypergraph=hypergraph,
model=self,
mask=non_dropout_mask,
)
]
norm = sparse_reduce_lse(*new_hye_to_node)
for assignment in range(self.K):
new_hye_to_node[assignment].data -= norm.data
new_hye_to_node[assignment].data = np.clip(
new_hye_to_node[assignment].data, a_min=CLIP_MIN, a_max=CLIP_MAX
)
for assignment in range(self.K):
self.log_hye_to_node[assignment].data[non_dropout_mask] = new_hye_to_node[
assignment
].data
logging.debug(f"\t\tUpdated hye to node:\n{self.log_hye_to_node[0].data[:5]}")
# Update marginals.
new_marginals = []
for assignment in range(self.K):
col_sum = self.log_hye_to_node[assignment].sum(axis=1)
assert col_sum.shape == (self.N,)
col_sum += np.log(self.n[assignment]) - self.external_field[assignment]
new_marginals.append(col_sum)
new_marginals = np.stack(new_marginals, axis=1)
assert new_marginals.shape == (self.N, self.K)
new_marginals = new_marginals - special.logsumexp(
new_marginals, axis=1, keepdims=True
)
new_marginals = np.clip(new_marginals, a_min=CLIP_MIN, a_max=CLIP_MAX)
if dropout > 0:
non_dropout_mask = self.rng.random(self.N) >= dropout
self.log_marginals[non_dropout_mask] = new_marginals[non_dropout_mask]
else:
self.log_marginals = new_marginals
logging.debug(f"\t\tUpdated marginals:\n{self.log_marginals[:5]}")
# Update external field.
lse_term = special.logsumexp(
a=self.log_marginals.reshape((self.N, self.K, 1)),
b=self.c.reshape(1, self.K, self.K),
axis=(0, 1),
)
assert lse_term.shape == (self.K,)
C_prime = compute_C_prime(self.max_hye_size)
self.external_field = C_prime / self.N * np.exp(lse_term)
logging.debug(f"\t\tUpdated external field:\n{self.external_field}")
def updated_community_prior(self) -> np.ndarray:
"""Parameter updates for the community priors n during EM inference.
Returns
-------
The updated array of community priors.
"""
assignments = self.community_assignments()
comm, counts = np.unique(assignments, return_counts=True)
n = np.zeros(self.K)
n[comm] = counts / self.N
return np.clip(n, a_min=1.0e-20, a_max=1.0)
def updated_affinity_matrix(self, hypergraph: IncidenceHypergraph) -> np.ndarray:
"""Parameter updates for the affinity matrix p during EM inference.
Parameters
----------
hypergraph: a hypergraph.
Returns
-------
The updated affinity matrix.
"""
# Numerator.
pi, interactions = self.hye_pi(hypergraph, return_interactions=True)
numerator = np.tensordot(
interactions, 1 / np.clip(pi, a_min=1.0e-20, a_max=None), axes=(0, 0)
)
assert numerator.shape == (self.K, self.K)
# Denominator.
C_prime = compute_C_prime(self.max_hye_size)
denominator = (
self.N * C_prime * (self.N * np.outer(self.n, self.n) - np.diag(self.n))
)
p = self.p * 2 * numerator / denominator
return np.clip(p, a_min=1e-20, a_max=0.99)
def community_assignments(self):
marginals = self.log_marginals
return np.argmax(marginals, axis=1)
def compute_external_field(self) -> np.array:
r"""Compute the approximate external field, defined as
.. math::
h(t_i) :=
\frac{C'}{N}
\sum_{j \in V} \sum_{t_j} c_{t_i t_j} q_j(t_j)
where
.. math::
C' = \sum_{d=2}^D \binom{N-2}{d-2} \frac{1}{\kappa_d}
Returns
-------
The external field h.
"""
log_marginals = self.log_marginals
c = self.c
K = self.K
N = self.N
C_prime = compute_C_prime(self.max_hye_size)
external_field = special.logsumexp(
a=log_marginals.reshape(N, 1, K), b=c.reshape(1, K, K), axis=(0, 2)
)
assert external_field.shape == (K,)
return C_prime / N * np.exp(external_field)
def single_hye_pi(self, assignments: Iterable[int]) -> float:
r"""Compute the hyperedge unnormalized probability.
For a hyperedge e and community assignments t, the unnormalized probability is
given by
.. math::
\pi_e := \sum_{i < j \in e} p_{t_i t_j}
Parameters
----------
assignments: community assignments.
This array contains the community assignments :math::`t_i` (with values
between 0 and K-1, where K is the number of communities) for all nodes i in
the hyperedge.
Returns
-------
The value of :math::`\pi_e`.
"""
K = self.K
hye_comm_counts = [0] * K
counts = Counter(assignments)
for comm, count in counts.items():
hye_comm_counts[comm] = count
return hyperedge_pi(hye_comm_counts, self.p)
def hye_pi(
self, hypergraph: IncidenceHypergraph, return_interactions: bool = False
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
r"""Compute the hyperedge unnormalized probabilities for all the hyperedges in
the hypergraph. For a hyperedge e, the unnormalized probability has form
.. math::
\pi_e := \sum_{i <j \in e} p_{t_i t_j}
with p affinity matrix and :math::`t_i` community assignment of node i.
Parameters
----------
hypergraph: the input hypergraph.
return_interactions: whether to optionally return the tensor of community
interactions within hyperedges, defined as, for any hyperedge e and
communities a, b:
.. math::
\#_{ab}^{(e)} := \sum_{i <j \in e} \delta_{t_i a} \delta_{t_j b}
where :math::`\delta_{xy}` is the Dirac delta, equal to 1 if :math::`x=y`,
else 0.
The tensor :math::`\#` has shape (E, K, K), with E number of hyperedges and
K number of communities.
Returns
-------
The array of :math::`\pi_e` values. Optionally, the tensor of :math::`\#`
values.
"""
E = hypergraph.E
K = self.K
p = self.p
incidence = hypergraph.get_binary_incidence_matrix()
onehot_assignments = np.zeros((self.N, K))
onehot_assignments[np.arange(self.N), self.community_assignments()] = 1
counts = incidence.transpose() @ onehot_assignments
assert counts.shape == (E, K)
del onehot_assignments
interactions = counts.reshape(E, 1, K) * counts.reshape(E, K, 1)
interactions[:, np.arange(K), np.arange(K)] = counts * (counts - 1) / 2
assert interactions.shape == (E, K, K)
del counts
pi = 0.5 * (
np.sum(interactions * p.reshape(1, K, K), axis=(1, 2))
+ np.inner(interactions[:, np.arange(K), np.arange(K)], np.diagonal(p))
)
if return_interactions:
return pi, interactions
return pi
def free_energy(self, hypergraph: IncidenceHypergraph) -> float:
"""Compute the free energy of a hypergraph utilizing the message passing
cavity approximations. The free energy, often denoted as :math::`F = -log Z`,
corresponds to the negative log-normalizing constant of the Boltzmann
distribution. Z is also called the evidence of the probabilistic model.
Parameters
----------
hypergraph: hypergraph.
Returns
-------
The log-likelihood value.
"""
self._check_hypergraph_vs_model_params(hypergraph)
K = self.K
N = self.N
external_field = self.compute_external_field()
ones = np.ones(hypergraph.E)
log_marginals = self.log_marginals
hye_dims = hypergraph.get_binary_incidence_matrix().sum(axis=0)
# Node-related addends.
f_i = [
x.tocsc().dot(ones) - external_field[k]
for k, x in enumerate(
compute_psi_dynamic_programming(hypergraph=hypergraph, model=self)
)
]
assert len(f_i) == K
assert all(x.shape == (N,) for x in f_i)
f_i = np.vstack(f_i).T
assert f_i.shape == (N, K)
f_i = special.logsumexp(a=f_i, b=self.n.reshape(1, -1), axis=1)
f_i_sum = f_i.sum()
# Edge-related addends.
# First addend. | first_addend = compute_psi_tilde_dynamic_programming( | 6 | 2023-12-06 22:01:38+00:00 | 8k |
sailfishos-chum/sailfishos-chum.github.io | chumweb/static_site_gen.py | [
{
"identifier": "CONFIG",
"path": "chumweb/config.py",
"snippet": "CONFIG = init_config()"
},
{
"identifier": "create_package_atom_feed",
"path": "chumweb/atom_feed.py",
"snippet": "def create_package_atom_feed(pkgs: List[Package], public_url: str, title: str) -> Document:\n \"\"\"\n Creates a Atom feed with packages\n :return: An XML Document representing the feed\n \"\"\"\n doc = create_atom_feed(public_url, title, pkgs[0].updated)\n feed = doc.getElementsByTagName(\"feed\")[0]\n\n for pkg in pkgs:\n feed.appendChild(_create_pkg_entry(doc, pkg))\n\n return doc"
},
{
"identifier": "Package",
"path": "chumweb/package.py",
"snippet": "class Package:\n \"\"\"\n Metadata of a RPM package with associated Chum metadata\n \"\"\"\n name: str\n summary: str | None = None\n description: str | Markup | None = None\n title: str | None = None\n icon: RemoteImage | None = None\n version: PackageVersion | None = None\n developer_name: str | None = None\n packager_name: str | None = None\n type: PackageApplicationType = PackageApplicationType.generic\n categories: Set[PackageApplicationCategory] = field(default_factory=lambda: {PackageApplicationCategory.other})\n screenshots: List[RemoteImage] = field(default_factory=list)\n links: Dict[str, str] = field(default_factory=dict)\n debuginfo_package: Self | None = None\n debugsource_package: Self | None = None\n url: str | None = None\n licence: str | None = None\n markdown_url: str | None = None\n repo_url: str | None = None\n packaging_repo_url: str | None = None\n debug_yaml: str | None = None\n debug_yaml_errors: List[Exception] = field(default_factory=list)\n updated: datetime | None = field(default_factory=lambda: datetime.fromtimestamp(0))\n\n repos: Set[str] = field(default_factory=set)\n archs: Set[str] = field(default_factory=set)\n download_size: Dict[str, int] = field(default_factory=dict)\n install_size: Dict[str, int] = field(default_factory=dict)\n download_url: Dict[str, str] = field(default_factory=dict)\n checksum_type: Dict[str, str] = field(default_factory=dict)\n checksum_value: Dict[str, str] = field(default_factory=dict)\n\n @staticmethod\n def from_node(dom_element, repo_arch: str):\n \"\"\"\n Creates a Package class instance from a `<package>` XML node `dom_element` as found in the primary.xml\n metadata in RPM repositories.\n \"\"\"\n\n def try_get_str(name) -> str | None:\n \"\"\"Return content of XML tag with `name` or None\"\"\"\n try:\n return dom_element.getElementsByTagName(name)[0].firstChild.nodeValue\n except (IndexError, AttributeError):\n return None\n\n def try_get_attribute_tags(name, *args: str):\n result = (())\n try:\n el = dom_element.getElementsByTagName(name)[0]\n\n for attr in args:\n result += (el.getAttribute(attr),)\n\n return result\n except IndexError:\n return tuple([None for _ in args])\n\n def try_get_version():\n \"\"\"Parse version\"\"\"\n epoch, ver, rel = try_get_attribute_tags(\"version\", \"epoch\", \"ver\", \"rel\")\n return PackageVersion(epoch, ver, rel)\n\n def name_to_title(name: str):\n name_parts: List[str] = name.split(\"-\")\n if name_parts[0] == \"harbour\" or name_parts[0] == \"openrepos\":\n name_parts.pop(0)\n if name_parts[0].startswith(\"lib\"):\n name_parts[0] = name_parts[0].removeprefix(\"lib\")\n name_parts.append(\"(library)\")\n if name_parts[-1] == \"devel\":\n name_parts[-1] = \"(development files)\"\n\n return \" \".join(map(str.capitalize, name_parts))\n\n def parse_description(description: str, name: str):\n from yaml import safe_load as yaml_load\n from yaml.parser import ParserError\n from yaml.scanner import ScannerError\n\n import re\n # Based on\n # https://github.com/sailfishos-chum/sailfishos-chum-gui/blob/0b2882fad79673b762ca184cd242d02334f1d8d1/src/chumpackage.cpp#L152C1-L152C108\n # Metadata, in YAML format, is put as the last paragraph of the application description. Paragraphs are\n # split by two newlines.\n paragraphs = [line for line in re.split(r\"(?m)^\\s*$\", description) if line.strip()]\n if not paragraphs:\n return\n\n yaml_part = paragraphs.pop()\n p.debug_yaml = yaml_part\n try:\n yaml = yaml_load(yaml_part)\n except (ParserError, ScannerError):\n yaml = None\n # If it happens that the description is not YAML, it'll be parsed as a str or generate a ParseError. In that\n # case, add the source back to the description\n if type(yaml) in [str, NoneType]:\n paragraphs.append(yaml_part)\n else:\n # Note: use Dict.get() to avoid IndexError's. We rather have None values\n p.title = yaml.get(\"Title\") or yaml.get(\"PackageName\") or name_to_title(name)\n p.type = yaml.get(\"Type\")\n\n icon_url = yaml.get(\"PackageIcon\") or yaml.get(\"Icon\")\n p.icon = RemoteImage(icon_url) if icon_url else None\n p.screenshots = list(map(lambda s: RemoteImage(s), yaml.get(\"Screenshots\", [])))\n p.developer_name = yaml.get(\"DeveloperName\")\n p.packager_name = yaml.get(\"PackagedBy\")\n\n if \"Custom\" in yaml:\n custom = yaml[\"Custom\"]\n if type(custom) is list:\n custom_list = custom\n custom = {}\n # Handle cases where the Custom value is a list of key-value pairs instead of an object :(\n for list_item in custom_list:\n custom |= {k: v for (k, v) in list_item.items()}\n\n p.repo_url = custom.get(\"Repo\")\n p.packaging_repo_url = custom.get(\"PackagingRepo\")\n p.markdown_url = custom.get(\"DescriptionMD\")\n\n try:\n p.links = {key.lower(): val for key, val in (yaml.get(\"Links\") or yaml.get(\"Url\", {})).items()}\n except AttributeError as e:\n p.debug_yaml_errors.append(e)\n\n try:\n p.categories = set(map(PackageApplicationCategory, yaml[\"Categories\"]))\n except (KeyError, ValueError) as e:\n p.debug_yaml_errors.append(e)\n\n p.description = \"\\n\\n\".join(map(lambda s: s.replace('\\n', ' '), paragraphs))\n\n arch = try_get_str(\"arch\")\n\n p = Package(try_get_str(\"name\"))\n p.repos.add(repo_arch)\n p.archs.add(arch)\n p.summary = try_get_str(\"summary\")\n p.version = try_get_version()\n p.url = try_get_str(\"url\")\n p.title = name_to_title(p.name)\n p.licence = try_get_str(\"rpm:license\")\n p.updated = datetime.fromtimestamp(float(try_get_attribute_tags(\"time\", \"file\")[0]), UTC)\n\n p.download_size[arch], p.install_size[arch] = try_get_attribute_tags(\"size\", \"package\", \"installed\")\n p.download_url[arch] = try_get_attribute_tags(\"location\", \"href\")[0]\n p.checksum_type[arch] = try_get_attribute_tags(\"checksum\", \"type\")[0]\n p.checksum_value[arch] = try_get_str(\"checksum\")\n\n try:\n parse_description(try_get_str(\"description\"), p.name)\n except Exception as e:\n p.description = try_get_str(\"description\")\n p.debug_yaml_errors.append(e)\n\n if p.name.startswith(\"lib\") and PackageApplicationCategory.library not in p.categories:\n p.categories.add(PackageApplicationCategory.library)\n\n return p\n\n def merge_arch(self, other_pkg: Self):\n \"\"\"\n Adds the architecture-specific information from another package to this package\n \"\"\"\n for arch in other_pkg.archs:\n self.repos = self.repos.union(other_pkg.repos)\n self.download_size[arch] = other_pkg.download_size[arch]\n self.install_size[arch] = other_pkg.install_size[arch]\n self.download_url[arch] = other_pkg.download_url[arch]\n self.checksum_type[arch] = other_pkg.checksum_type[arch]\n self.checksum_value[arch] = other_pkg.checksum_value[arch]\n self.archs.add(arch)\n\n def is_app(self) -> bool:\n \"\"\"\n Heuristic to detect whether this is a graphical app that users would like to install\n \"\"\"\n return self.type == PackageApplicationType.desktop_application \\\n or self.name.startswith(\"harbour-\") \\\n and not self.is_debug()\n\n def is_debug(self) -> bool:\n return self.name.endswith(\"-debuginfo\") or self.name.endswith(\"-debugsource\")\n\n def web_url(self):\n \"\"\"\n Returns the url for use in the web interface\n \"\"\"\n if self.is_app():\n return f\"apps/{self.name}/\"\n else:\n return f\"pkgs/{self.name}/\"\n\n def get_download_url(self, arch: str) -> Optional[str]:\n # noarch does not have a dedicated repository, use the first available arch I suppose\n # This may be an idea in the category \"not smart\"\n if arch == \"noarch\":\n repo = next(self.repos.__iter__())\n else:\n for repo in self.repos:\n repo_arch = repo.split(\"_\")[1]\n if repo_arch == arch:\n break\n else:\n logger.warning(f\"No repo found for architecture {arch} (package: {self.name})\")\n #assert False, f\"No repo found for architecture {arch} (package: {self.name})\"\n return None\n\n return f\"{CONFIG.repo_url_prefix}{repo}/\" + self.download_url[arch]\n\n\n def caused_requests(self):\n return type(self.markdown_url) == str\n\n def requested_urls(self):\n return [self.markdown_url]\n\n def to_search_dict(self):\n return {\n \"name\": self.name,\n \"title\": self.title,\n \"url\": self.web_url(),\n \"icon\": self.icon.remote_url if self.icon else None,\n \"summary\": self.summary,\n \"description\": self.description,\n \"version\": self.version.to_full_str(),\n \"version_short\": self.version.to_short_str(),\n \"is_app\": self.is_app(),\n \"is_debug\": self.is_debug()\n }"
},
{
"identifier": "PackageApplicationCategory",
"path": "chumweb/package.py",
"snippet": "class PackageApplicationCategory(StrEnum):\n \"\"\"\n Desktop application categories, from https://specifications.freedesktop.org/menu-spec/latest/apa.html\n \"\"\"\n accessibility = \"Accessibility\" # Added by Chum?\n audio_video = \"AudioVideo\"\n audio = \"Audio\"\n video = \"Video\"\n development = \"Development\"\n education = \"Education\"\n game = \"Game\"\n graphics = \"Graphics\"\n library = \"Library\" # Added by Chum?\n maps = \"Maps\" # Added by Chum?\n network = \"Network\"\n office = \"Office\"\n science = \"Science\"\n settings = \"Settings\"\n system = \"System\"\n utility = \"Utility\"\n other = \"Other\""
},
{
"identifier": "begin_step",
"path": "chumweb/progress.py",
"snippet": "def begin_step(step_name: str) -> StepHandle:\n global _current_step_name, _current_step\n step = _current_step\n if _current_step_name != step_name:\n _current_step_name = step_name\n _print_step(step_name)\n step += 1\n _current_step = step\n\n return step"
},
{
"identifier": "step_progress",
"path": "chumweb/progress.py",
"snippet": "def step_progress(step: StepHandle, subtask_name: str, progress: int, total: int):\n if _current_step == step:\n _print_step_progress(subtask_name, progress, total)"
},
{
"identifier": "RepoInfo",
"path": "chumweb/repo_loader.py",
"snippet": "class RepoInfo:\n packages: List[Package]\n repos: List[str]\n version: str # SFOS version\n\n def repo_archs(self):\n \"\"\"\n :return: The architecture for which there is a repository\n \"\"\"\n return [repo.split(\"_\")[1] for repo in self.repos]"
}
] | import dataclasses
import json
import os
import random
import importlib.resources as resources
import lunr
from dataclasses import dataclass
from urllib.parse import urljoin
from jinja2 import Environment, PackageLoader, Template, select_autoescape, pass_eval_context
from jinja2.nodes import EvalContext
from markupsafe import Markup
from os import makedirs, mkdir
from pathlib import Path
from shutil import rmtree
from typing import List, Dict, Tuple, Set
from . import CONFIG
from .atom_feed import create_package_atom_feed
from .package import Package, PackageApplicationCategory
from datetime import datetime
from .progress import begin_step, step_progress
from .repo_loader import RepoInfo
from math import log2 | 3,631 | """
This module generates the static website
"""
ALPHABET = "abcdefghijklmnopqrstuvwxyz"
@dataclass
class PackageIndex:
id: str
display: str
page_title: str
file: str
pkgs: List[Package]
def as_dict(self):
# dataclasses.asdict() converts the pkgs to dicts as well, which is not what I want, hence the hand-typed version
return {
"id": self.id,
"display": self.display,
"page_title": self.page_title,
"file": self.file,
"pkgs": self.pkgs
}
@dataclass
class CategoryPage:
name: str
categories: Set[str]
def __hash__(self):
return self.name.__hash__()
@dataclass
class Feed:
title: str
path: str
pkgs: List[Package]
def __getattr__(self, item):
if item == "url":
return CONFIG.public_url + self.path
CATEGORY_PAGES = [
CategoryPage("Accessibility", {PackageApplicationCategory.accessibility}),
CategoryPage("Development", {PackageApplicationCategory.development}),
CategoryPage("Education", {PackageApplicationCategory.education}),
CategoryPage("Games", {PackageApplicationCategory.game}),
CategoryPage("Graphics", {PackageApplicationCategory.graphics}),
CategoryPage("Libraries", {PackageApplicationCategory.library}),
CategoryPage("Location and Navigation", {PackageApplicationCategory.maps}),
CategoryPage("Multimedia", {PackageApplicationCategory.audio, PackageApplicationCategory.video,
PackageApplicationCategory.audio_video}),
CategoryPage("Office", {PackageApplicationCategory.office}),
CategoryPage("Science", {PackageApplicationCategory.science}),
CategoryPage("Utilities", {PackageApplicationCategory.system, PackageApplicationCategory.utility}),
CategoryPage("Other", {PackageApplicationCategory.other}),
]
def gen_site(repo_info: RepoInfo, out_dir: Path):
"""
Generates the static website given a list of packages
:param repo_info: The repository info and packages to generate the website for
:param out_dir: The directory to output the generated website in
"""
| """
This module generates the static website
"""
ALPHABET = "abcdefghijklmnopqrstuvwxyz"
@dataclass
class PackageIndex:
id: str
display: str
page_title: str
file: str
pkgs: List[Package]
def as_dict(self):
# dataclasses.asdict() converts the pkgs to dicts as well, which is not what I want, hence the hand-typed version
return {
"id": self.id,
"display": self.display,
"page_title": self.page_title,
"file": self.file,
"pkgs": self.pkgs
}
@dataclass
class CategoryPage:
name: str
categories: Set[str]
def __hash__(self):
return self.name.__hash__()
@dataclass
class Feed:
title: str
path: str
pkgs: List[Package]
def __getattr__(self, item):
if item == "url":
return CONFIG.public_url + self.path
CATEGORY_PAGES = [
CategoryPage("Accessibility", {PackageApplicationCategory.accessibility}),
CategoryPage("Development", {PackageApplicationCategory.development}),
CategoryPage("Education", {PackageApplicationCategory.education}),
CategoryPage("Games", {PackageApplicationCategory.game}),
CategoryPage("Graphics", {PackageApplicationCategory.graphics}),
CategoryPage("Libraries", {PackageApplicationCategory.library}),
CategoryPage("Location and Navigation", {PackageApplicationCategory.maps}),
CategoryPage("Multimedia", {PackageApplicationCategory.audio, PackageApplicationCategory.video,
PackageApplicationCategory.audio_video}),
CategoryPage("Office", {PackageApplicationCategory.office}),
CategoryPage("Science", {PackageApplicationCategory.science}),
CategoryPage("Utilities", {PackageApplicationCategory.system, PackageApplicationCategory.utility}),
CategoryPage("Other", {PackageApplicationCategory.other}),
]
def gen_site(repo_info: RepoInfo, out_dir: Path):
"""
Generates the static website given a list of packages
:param repo_info: The repository info and packages to generate the website for
:param out_dir: The directory to output the generated website in
""" | sitegen_step = begin_step("Generating site") | 4 | 2023-12-14 19:25:31+00:00 | 8k |
oVo-HxBots/URLUploadBot | Uploader/callbacks.py | [
{
"identifier": "progress_for_pyrogram",
"path": "Uploader/functions/display_progress.py",
"snippet": "async def progress_for_pyrogram(\n current,\n total,\n ud_type,\n message,\n start\n):\n now = time.time()\n diff = now - start\n if round(diff % 10.00) == 0 or current == total:\n # if round(current / total * 100, 0) % 5 == 0:\n percentage = current * 100 / total\n speed = current / diff\n elapsed_time = round(diff) * 1000\n time_to_completion = round((total - current) / speed) * 1000\n estimated_total_time = elapsed_time + time_to_completion\n\n elapsed_time = TimeFormatter(milliseconds=elapsed_time)\n estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)\n\n progress = \"[{0}{1}] \\nP: {2}%\\n\".format(\n ''.join([\"◾\" for _ in range(math.floor(percentage / 5))]),\n ''.join([\"◽\" for _ in range(20 - math.floor(percentage / 5))]),\n round(percentage, 2),\n )\n\n tmp = progress + \"{0} of {1}\\n\\nSpeed: {2}/s\\n\\nETA: {3}\\n\\n\".format(\n humanbytes(current),\n humanbytes(total),\n humanbytes(speed),\n # elapsed_time if elapsed_time != '' else \"0 s\",\n estimated_total_time if estimated_total_time != '' else \"0 s\"\n )\n try:\n await message.edit(text=f\"{ud_type}\\n {tmp}\")\n except Exception as e:\n logger.info(f\"Error {e}\")\n return"
},
{
"identifier": "humanbytes",
"path": "Uploader/functions/display_progress.py",
"snippet": "def humanbytes(size):\n # https://stackoverflow.com/a/49361727/4723940\n # 2**10 = 1024\n if not size:\n return \"\"\n power = 2**10\n n = 0\n Dic_powerN = {0: ' ', 1: 'K', 2: 'M', 3: 'G', 4: 'T'}\n while size > power:\n size /= power\n n += 1\n return f\"{str(round(size, 2))} {Dic_powerN[n]}B\""
},
{
"identifier": "ddl_call_back",
"path": "Uploader/dl_button.py",
"snippet": "async def ddl_call_back(bot, update): # sourcery skip: low-code-quality\n cb_data = update.data\n tg_send_type, youtube_dl_format, youtube_dl_ext = cb_data.split(\"=\")\n youtube_dl_url = update.message.reply_to_message.text\n custom_file_name = os.path.basename(youtube_dl_url)\n if \" \" in youtube_dl_url:\n url_parts = youtube_dl_url.split(\" * \")\n if len(url_parts) == 2:\n youtube_dl_url = url_parts[0]\n custom_file_name = url_parts[1]\n else:\n for entity in update.message.reply_to_message.entities:\n if entity.type == \"text_link\":\n youtube_dl_url = entity.url\n elif entity.type == \"url\":\n o = entity.offset\n l = entity.length\n youtube_dl_url = youtube_dl_url[o:o + l]\n if youtube_dl_url is not None:\n youtube_dl_url = youtube_dl_url.strip()\n if custom_file_name is not None:\n custom_file_name = custom_file_name.strip()\n else:\n for entity in update.message.reply_to_message.entities:\n if entity.type == \"text_link\":\n youtube_dl_url = entity.url\n elif entity.type == \"url\":\n o = entity.offset\n l = entity.length\n youtube_dl_url = youtube_dl_url[o:o + l]\n description = custom_file_name\n if f\".{youtube_dl_ext}\" not in custom_file_name:\n custom_file_name += f'.{youtube_dl_ext}'\n logger.info(youtube_dl_url)\n logger.info(custom_file_name)\n start = datetime.now()\n await bot.edit_message_text(text=Translation.DOWNLOAD_START.format(custom_file_name), chat_id=update.message.chat.id, message_id=update.message.id)\n\n tmp_directory_for_each_user = f\"{Config.DOWNLOAD_LOCATION}/{str(update.from_user.id)}\"\n\n if not os.path.isdir(tmp_directory_for_each_user):\n os.makedirs(tmp_directory_for_each_user)\n download_directory = f\"{tmp_directory_for_each_user}/{custom_file_name}\"\n command_to_exec = []\n async with aiohttp.ClientSession() as session:\n c_time = time.time()\n try:\n await download_coroutine(bot, session, youtube_dl_url, download_directory, update.message.chat.id, update.message.id, c_time)\n\n except asyncio.TimeoutError:\n await bot.edit_message_text(text=Translation.SLOW_URL_DECED, chat_id=update.message.chat.id, message_id=update.message.id)\n\n return False\n if os.path.exists(download_directory):\n save_ytdl_json_path = f\"{Config.DOWNLOAD_LOCATION}/{str(update.message.chat.id)}.json\"\n download_location = f\"{Config.DOWNLOAD_LOCATION}/{update.from_user.id}.jpg\"\n thumb = download_location if os.path.isfile(\n download_location) else None\n\n if os.path.exists(save_ytdl_json_path):\n os.remove(save_ytdl_json_path)\n end_one = datetime.now()\n await bot.edit_message_text(text=Translation.UPLOAD_START, chat_id=update.message.chat.id, message_id=update.message.id)\n\n file_size = Config.TG_MAX_FILE_SIZE + 1\n try:\n file_size = os.stat(download_directory).st_size\n except FileNotFoundError as exc:\n download_directory = f\"{os.path.splitext(download_directory)[0]}.mkv\"\n file_size = os.stat(download_directory).st_size\n if file_size > Config.TG_MAX_FILE_SIZE:\n await bot.edit_message_text(chat_id=update.message.chat.id, text=Translation.RCHD_TG_API_LIMIT, message_id=update.message.id)\n\n else:\n start_time = time.time()\n if tg_send_type == \"video\":\n width, height, duration = await Mdata01(download_directory)\n await bot.send_video(chat_id=update.message.chat.id, video=download_directory, thumb=thumb, caption=description, duration=duration, width=width, height=height, supports_streaming=True, reply_to_message_id=update.message.reply_to_message.id, progress=progress_for_pyrogram, progress_args=(Translation.UPLOAD_START, update.message, start_time))\n\n elif tg_send_type == \"audio\":\n duration = await Mdata03(download_directory)\n await bot.send_audio(chat_id=update.message.chat.id, audio=download_directory, thumb=thumb, caption=description, duration=duration, reply_to_message_id=update.message.reply_to_message.id, progress=progress_for_pyrogram, progress_args=(Translation.UPLOAD_START, update.message, start_time))\n\n elif tg_send_type == \"vm\":\n width, duration = await Mdata02(download_directory)\n await bot.send_video_note(chat_id=update.message.chat.id, video_note=download_directory, thumb=thumb, duration=duration, length=width, reply_to_message_id=update.message.reply_to_message.id, progress=progress_for_pyrogram, progress_args=(Translation.UPLOAD_START, update.message, start_time))\n\n else:\n await bot.send_document(chat_id=update.message.chat.id, document=download_directory, thumb=thumb, caption=description, reply_to_message_id=update.message.reply_to_message.id, progress=progress_for_pyrogram, progress_args=(Translation.UPLOAD_START, update.message, start_time))\n\n end_two = datetime.now()\n try:\n os.remove(download_directory)\n except Exception:\n pass\n time_taken_for_download = (end_one - start).seconds\n time_taken_for_upload = (end_two - end_one).seconds\n await bot.edit_message_text(text=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(time_taken_for_download, time_taken_for_upload), chat_id=update.message.chat.id, message_id=update.message.id, disable_web_page_preview=True)\n\n logger.info(f\"Downloaded in: {str(time_taken_for_download)}\")\n logger.info(f\"Uploaded in: {str(time_taken_for_upload)}\")\n else:\n await bot.edit_message_text(text=Translation.NO_VOID_FORMAT_FOUND.format(\"Incorrect Link\"), chat_id=update.message.chat.id, message_id=update.message.id, disable_web_page_preview=True)"
},
{
"identifier": "youtube_dl_call_back",
"path": "Uploader/button.py",
"snippet": "async def youtube_dl_call_back(bot, update):\n cb_data = update.data\n # youtube_dl extractors\n tg_send_type, youtube_dl_format, youtube_dl_ext, ranom = cb_data.split(\"|\")\n print(cb_data)\n random1 = random_char(5)\n save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \\\n \"/\" + str(update.from_user.id) + f'{ranom}' + \".json\"\n try:\n with open(save_ytdl_json_path, \"r\", encoding=\"utf8\") as f:\n response_json = json.load(f)\n except (FileNotFoundError) as e:\n await update.message.delete()\n return False\n youtube_dl_url = update.message.reply_to_message.text\n custom_file_name = str(response_json.get(\"title\")) + \\\n \"_\" + youtube_dl_format + \".\" + youtube_dl_ext\n youtube_dl_username = None\n youtube_dl_password = None\n if \"|\" in youtube_dl_url:\n url_parts = youtube_dl_url.split(\"|\")\n if len(url_parts) == 2:\n youtube_dl_url = url_parts[0]\n custom_file_name = url_parts[1]\n elif len(url_parts) == 4:\n youtube_dl_url = url_parts[0]\n custom_file_name = url_parts[1]\n youtube_dl_username = url_parts[2]\n youtube_dl_password = url_parts[3]\n else:\n for entity in update.message.reply_to_message.entities:\n if entity.type == \"text_link\":\n youtube_dl_url = entity.url\n elif entity.type == \"url\":\n o = entity.offset\n l = entity.length\n youtube_dl_url = youtube_dl_url[o:o + l]\n if youtube_dl_url is not None:\n youtube_dl_url = youtube_dl_url.strip()\n if custom_file_name is not None:\n custom_file_name = custom_file_name.strip()\n # https://stackoverflow.com/a/761825/4723940\n if youtube_dl_username is not None:\n youtube_dl_username = youtube_dl_username.strip()\n if youtube_dl_password is not None:\n youtube_dl_password = youtube_dl_password.strip()\n logger.info(youtube_dl_url)\n logger.info(custom_file_name)\n else:\n for entity in update.message.reply_to_message.entities:\n if entity.type == \"text_link\":\n youtube_dl_url = entity.url\n elif entity.type == \"url\":\n o = entity.offset\n l = entity.length\n youtube_dl_url = youtube_dl_url[o:o + l]\n await update.message.edit_caption(\n caption=Translation.DOWNLOAD_START.format(custom_file_name)\n\n )\n description = Translation.CUSTOM_CAPTION_UL_FILE\n if \"fulltitle\" in response_json:\n description = response_json[\"fulltitle\"][:1021]\n # escape Markdown and special characters\n tmp_directory_for_each_user = Config.DOWNLOAD_LOCATION + \\\n \"/\" + str(update.from_user.id) + f'{random1}'\n if not os.path.isdir(tmp_directory_for_each_user):\n os.makedirs(tmp_directory_for_each_user)\n download_directory = f\"{tmp_directory_for_each_user}/{custom_file_name}\"\n\n command_to_exec = []\n if tg_send_type == \"audio\":\n command_to_exec = [\n \"yt-dlp\",\n \"-c\",\n \"--max-filesize\", str(Config.TG_MAX_FILE_SIZE),\n \"--bidi-workaround\",\n \"--extract-audio\",\n \"--audio-format\", youtube_dl_ext,\n \"--audio-quality\", youtube_dl_format,\n youtube_dl_url,\n \"-o\", download_directory\n ]\n else:\n # command_to_exec = [\"yt-dlp\", \"-f\", youtube_dl_format, \"--hls-prefer-ffmpeg\", \"--recode-video\", \"mp4\", \"-k\", youtube_dl_url, \"-o\", download_directory]\n minus_f_format = youtube_dl_format\n if \"youtu\" in youtube_dl_url:\n minus_f_format = f\"{youtube_dl_format}+bestaudio\"\n command_to_exec = [\n \"yt-dlp\",\n \"-c\",\n \"--max-filesize\", str(Config.TG_MAX_FILE_SIZE),\n \"--embed-subs\",\n \"-f\", minus_f_format,\n \"--bidi-workaround\",\n youtube_dl_url,\n \"-o\", download_directory\n ]\n\n if Config.HTTP_PROXY != \"\":\n command_to_exec.append(\"--proxy\")\n command_to_exec.append(Config.HTTP_PROXY)\n if youtube_dl_username is not None:\n command_to_exec.append(\"--username\")\n command_to_exec.append(youtube_dl_username)\n if youtube_dl_password is not None:\n command_to_exec.append(\"--password\")\n command_to_exec.append(youtube_dl_password)\n command_to_exec.append(\"--no-warnings\")\n # command_to_exec.append(\"--quiet\")\n logger.info(command_to_exec)\n start = datetime.now()\n process = await asyncio.create_subprocess_exec(\n *command_to_exec,\n # stdout must a pipe to be accessible as process.stdout\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n # Wait for the subprocess to finish\n stdout, stderr = await process.communicate()\n e_response = stderr.decode().strip()\n t_response = stdout.decode().strip()\n logger.info(e_response)\n logger.info(t_response)\n ad_string_to_replace = \"please report this issue on https://github.com/kalanakt/All-Url-Uploader/issues\"\n if e_response and ad_string_to_replace in e_response:\n error_message = e_response.replace(ad_string_to_replace, \"\")\n await update.message.edit_caption(\n\n text=error_message\n )\n return False\n\n if t_response:\n logger.info(t_response)\n try:\n os.remove(save_ytdl_json_path)\n except FileNotFoundError as exc:\n pass\n\n end_one = datetime.now()\n time_taken_for_download = (end_one - start).seconds\n file_size = Config.TG_MAX_FILE_SIZE + 1\n try:\n file_size = os.stat(download_directory).st_size\n except FileNotFoundError as exc:\n download_directory = os.path.splitext(\n download_directory)[0] + \".\" + \"mkv\"\n # https://stackoverflow.com/a/678242/4723940\n file_size = os.stat(download_directory).st_size\n\n download_location = f\"{Config.DOWNLOAD_LOCATION}/{update.from_user.id}.jpg\"\n thumb = download_location if os.path.isfile(\n download_location) else None\n\n if ((file_size > Config.TG_MAX_FILE_SIZE)):\n await update.message.edit_caption(\n\n caption=Translation.RCHD_TG_API_LIMIT.format(\n time_taken_for_download, humanbytes(file_size))\n\n )\n else:\n await update.message.edit_caption(\n caption=Translation.UPLOAD_START.format(custom_file_name)\n\n )\n start_time = time.time()\n if tg_send_type == \"video\":\n width, height, duration = await Mdata01(download_directory)\n await update.message.reply_video(\n # chat_id=update.message.chat.id,\n video=download_directory,\n caption=description,\n duration=duration,\n width=width,\n height=height,\n supports_streaming=True,\n thumb=thumb,\n # reply_to_message_id=update.id,\n progress=progress_for_pyrogram,\n progress_args=(\n Translation.UPLOAD_START,\n update.message,\n start_time\n )\n )\n elif tg_send_type == \"audio\":\n duration = await Mdata03(download_directory)\n await update.message.reply_audio(\n # chat_id=update.message.chat.id,\n audio=download_directory,\n caption=description,\n duration=duration,\n thumb=thumb,\n # reply_to_message_id=update.id,\n progress=progress_for_pyrogram,\n progress_args=(\n Translation.UPLOAD_START,\n update.message,\n start_time\n )\n )\n elif tg_send_type == \"vm\":\n width, duration = await Mdata02(download_directory)\n await update.message.reply_video_note(\n # chat_id=update.message.chat.id,\n video_note=download_directory,\n duration=duration,\n length=width,\n thumb=thumb,\n # reply_to_message_id=update.id,\n progress=progress_for_pyrogram,\n progress_args=(\n Translation.UPLOAD_START,\n update.message,\n start_time\n )\n )\n else:\n await update.message.reply_document(\n # chat_id=update.message.chat.id,\n document=download_directory,\n caption=description,\n # parse_mode=enums.ParseMode.HTML,\n # reply_to_message_id=update.id,\n thumb=thumb,\n progress=progress_for_pyrogram,\n progress_args=(\n Translation.UPLOAD_START,\n update.message,\n start_time\n )\n )\n\n end_two = datetime.now()\n time_taken_for_upload = (end_two - end_one).seconds\n try:\n shutil.rmtree(tmp_directory_for_each_user)\n except Exception:\n pass\n await update.message.edit_caption(\n caption=Translation.AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS.format(\n time_taken_for_download, time_taken_for_upload)\n\n )\n\n logger.info(f\"Downloaded in: {str(time_taken_for_download)}\")\n logger.info(f\"Uploaded in: {str(time_taken_for_upload)}\")"
},
{
"identifier": "Translation",
"path": "Uploader/script.py",
"snippet": "class Translation(object):\n\n START_TEXT = \"\"\"\nHi {} \n\nI am Powerful Url Uploader Bot\n \n\"\"\"\n\n HELP_TEXT = \"\"\"\n\n# Send me the Google Drive | ytdl | direct links.\n\n# Select the desired option.\n\n# Then be relaxed your file will be uploaded soon..\n \n\"\"\"\n\n# give credit to developer\n\n ABOUT_TEXT = \"\"\"\n<b>♻️ My Name</b> : Url Uploader Bot\n\n<b>🌀 Channel</b> : <a href=\"https://t.me/TMWAD\">@TMWAD</a>\n\n<b>🌺 Heroku</b> : <a href=\"https://heroku.com/\">Heroku</a>\n\n<b>📑 Language :</b> <a href=\"https://www.python.org/\">Python 3.10.5</a>\n\n<b>🇵🇲 Framework :</b> <a href=\"https://docs.pyrogram.org/\">Pyrogram 2.0.30</a>\n\n<b>👲 Developer :</b> <a href=\"https://t.me/kinu6\">@kinu6</a>\n\n\"\"\"\n\n PROGRESS = \"\"\"\n🔰 Speed : {3}/s\\n\\n\n🌀 Done : {1}\\n\\n\n🎥 Tᴏᴛᴀʟ sɪᴢᴇ : {2}\\n\\n\n⏳ Tɪᴍᴇ ʟᴇғᴛ : {4}\\n\\n\n\"\"\"\n ID_TEXT = \"\"\"\n🆔 Your Telegram ID 𝐢𝐬 :- <code>{}</code>\n\"\"\"\n\n INFO_TEXT = \"\"\"\n\n 🤹 First Name : <b>{}</b>\n\n 🚴♂️ Second Name : <b>{}</b>\n\n 🧑🏻🎓 Username : <b>@{}</b>\n\n 🆔 Telegram Id : <code>{}</code>\n\n 📇 Profile Link : <b>{}</b>\n\n 📡 Dc : <b>{}</b>\n\n 📑 Language : <b>{}</b>\n\n 👲 Status : <b>{}</b>\n\"\"\"\n\n START_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('❓ Help', callback_data='help'),\n InlineKeyboardButton('🦊 About', callback_data='about')\n ], [\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n HELP_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('🏠 Home', callback_data='home'),\n InlineKeyboardButton('🦊 About', callback_data='about')\n ], [\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n ABOUT_BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('🏠 Home', callback_data='home'),\n InlineKeyboardButton('❓ Help', callback_data='help')\n ], [\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n BUTTONS = InlineKeyboardMarkup(\n [[\n InlineKeyboardButton('📛 Close', callback_data='close')\n ]]\n )\n FORMAT_SELECTION = \"Now Select the desired formats\"\n SET_CUSTOM_USERNAME_PASSWORD = \"\"\"\"\"\"\n DOWNLOAD_START = \"Trying to Download ⌛\\n\\n <i>{} </i>\"\n UPLOAD_START = \"<i>{} </i>\\n\\n📤 Uploading Please Wait \"\n RCHD_TG_API_LIMIT = \"Downloaded in {} seconds.\\nDetected File Size: {}\\nSorry. But, I cannot upload files greater than 2GB due to Telegram API limitations.\"\n AFTER_SUCCESSFUL_UPLOAD_MSG_WITH_TS = \"Dᴏᴡɴʟᴏᴀᴅᴇᴅ ɪɴ {} sᴇᴄᴏɴᴅs.\\n\\nTʜᴀɴᴋs Fᴏʀ Usɪɴɢ Mᴇ\\n\\nUᴘʟᴏᴀᴅᴇᴅ ɪɴ {} sᴇᴄᴏɴᴅs\"\n FF_MPEG_DEL_ETED_CUSTOM_MEDIA = \"✅ Media cleared succesfully.\"\n CUSTOM_CAPTION_UL_FILE = \" \"\n NO_VOID_FORMAT_FOUND = \"ERROR... <code>{}</code>\"\n SLOW_URL_DECED = \"Gosh that seems to be a very slow URL. Since you were screwing my home, I am in no mood to download this file. Meanwhile, why don't you try this:==> https://shrtz.me/PtsVnf6 and get me a fast URL so that I can upload to Telegram, without me slowing down for other users.\""
}
] | import os
import logging
from Uploader.functions.display_progress import progress_for_pyrogram, humanbytes
from Uploader.config import Config
from sample_config import Config
from Uploader.dl_button import ddl_call_back
from Uploader.button import youtube_dl_call_back
from Uploader.script import Translation
from pyrogram import Client, types
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton | 5,889 | # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@Client.on_callback_query()
async def button(bot, update):
if update.data == "home":
await update.message.edit(
text=Translation.START_TEXT.format(update.from_user.mention),
reply_markup=Translation.START_BUTTONS,
# disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit(
text=Translation.HELP_TEXT,
reply_markup=Translation.HELP_BUTTONS,
# disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit(
text=Translation.ABOUT_TEXT,
reply_markup=Translation.ABOUT_BUTTONS,
# disable_web_page_preview=True
)
elif "close" in update.data:
await update.message.delete(True)
elif "|" in update.data:
await youtube_dl_call_back(bot, update)
elif "=" in update.data:
| # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
@Client.on_callback_query()
async def button(bot, update):
if update.data == "home":
await update.message.edit(
text=Translation.START_TEXT.format(update.from_user.mention),
reply_markup=Translation.START_BUTTONS,
# disable_web_page_preview=True
)
elif update.data == "help":
await update.message.edit(
text=Translation.HELP_TEXT,
reply_markup=Translation.HELP_BUTTONS,
# disable_web_page_preview=True
)
elif update.data == "about":
await update.message.edit(
text=Translation.ABOUT_TEXT,
reply_markup=Translation.ABOUT_BUTTONS,
# disable_web_page_preview=True
)
elif "close" in update.data:
await update.message.delete(True)
elif "|" in update.data:
await youtube_dl_call_back(bot, update)
elif "=" in update.data: | await ddl_call_back(bot, update) | 2 | 2023-12-09 03:24:55+00:00 | 8k |
Jiawei-Yao0812/PixelFormer_DGR | pixelformer/networks/PixelFormer.py | [
{
"identifier": "SwinTransformer",
"path": "pixelformer/networks/swin_transformer.py",
"snippet": "class SwinTransformer(nn.Module):\n \"\"\" Swin Transformer backbone.\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n\n Args:\n pretrain_img_size (int): Input image size for training the pretrained model,\n used in absolute postion embedding. Default 224.\n patch_size (int | tuple(int)): Patch size. Default: 4.\n in_chans (int): Number of input image channels. Default: 3.\n embed_dim (int): Number of linear projection output channels. Default: 96.\n depths (tuple[int]): Depths of each Swin Transformer stage.\n num_heads (tuple[int]): Number of attention head of each stage.\n window_size (int): Window size. Default: 7.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.\n drop_rate (float): Dropout rate.\n attn_drop_rate (float): Attention dropout rate. Default: 0.\n drop_path_rate (float): Stochastic depth rate. Default: 0.2.\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.\n patch_norm (bool): If True, add normalization after patch embedding. Default: True.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n \"\"\"\n\n def __init__(self,\n pretrain_img_size=224,\n patch_size=4,\n in_chans=3,\n embed_dim=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n mlp_ratio=4.,\n qkv_bias=True,\n qk_scale=None,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.2,\n norm_layer=nn.LayerNorm,\n ape=False,\n patch_norm=True,\n out_indices=(0, 1, 2, 3),\n frozen_stages=-1,\n use_checkpoint=False):\n super().__init__()\n\n self.pretrain_img_size = pretrain_img_size\n self.num_layers = len(depths)\n self.embed_dim = embed_dim\n self.ape = ape\n self.patch_norm = patch_norm\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n\n # split image into non-overlapping patches\n self.patch_embed = PatchEmbed(\n patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n norm_layer=norm_layer if self.patch_norm else None)\n\n # absolute position embedding\n if self.ape:\n pretrain_img_size = to_2tuple(pretrain_img_size)\n patch_size = to_2tuple(patch_size)\n patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]\n\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n self.pos_drop = nn.Dropout(p=drop_rate)\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n layer = BasicLayer(\n dim=int(embed_dim * 2 ** i_layer),\n depth=depths[i_layer],\n num_heads=num_heads[i_layer],\n window_size=window_size,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n drop=drop_rate,\n attn_drop=attn_drop_rate,\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n norm_layer=norm_layer,\n downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint)\n self.layers.append(layer)\n\n num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]\n self.num_features = num_features\n\n # add a norm layer for each output\n for i_layer in out_indices:\n layer = norm_layer(num_features[i_layer])\n layer_name = f'norm{i_layer}'\n self.add_module(layer_name, layer)\n\n self._freeze_stages()\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n self.patch_embed.eval()\n for param in self.patch_embed.parameters():\n param.requires_grad = False\n\n if self.frozen_stages >= 1 and self.ape:\n self.absolute_pos_embed.requires_grad = False\n\n if self.frozen_stages >= 2:\n self.pos_drop.eval()\n for i in range(0, self.frozen_stages - 1):\n m = self.layers[i]\n m.eval()\n for param in m.parameters():\n param.requires_grad = False\n\n def init_weights(self, pretrained=None):\n \"\"\"Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n \"\"\"\n\n def _init_weights(m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n if isinstance(pretrained, str):\n self.apply(_init_weights)\n # logger = get_root_logger()\n load_checkpoint(self, pretrained, strict=False)\n elif pretrained is None:\n self.apply(_init_weights)\n else:\n raise TypeError('pretrained must be a str or None')\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n x = self.patch_embed(x)\n\n Wh, Ww = x.size(2), x.size(3)\n if self.ape:\n # interpolate the position embedding to the corresponding size\n absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')\n x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C\n else:\n x = x.flatten(2).transpose(1, 2)\n x = self.pos_drop(x)\n\n outs = []\n for i in range(self.num_layers):\n layer = self.layers[i]\n x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)\n\n if i in self.out_indices:\n norm_layer = getattr(self, f'norm{i}')\n x_out = norm_layer(x_out)\n\n out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()\n outs.append(out)\n\n return tuple(outs)\n\n def train(self, mode=True):\n \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n super(SwinTransformer, self).train(mode)\n self._freeze_stages()"
},
{
"identifier": "PSP",
"path": "pixelformer/networks/PQI.py",
"snippet": "class PSP(BaseDecodeHead):\n \"\"\"Unified Perceptual Parsing for Scene Understanding.\n\n This head is the implementation of `UPerNet\n <https://arxiv.org/abs/1807.10221>`_.\n\n Args:\n pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid\n Module applied on the last feature. Default: (1, 2, 3, 6).\n \"\"\"\n\n def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):\n super(PSP, self).__init__(\n input_transform='multiple_select', **kwargs)\n # PSP Module\n self.psp_modules = PPM(\n pool_scales,\n self.in_channels[-1],\n self.channels,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n align_corners=self.align_corners)\n self.bottleneck = ConvModule(\n self.in_channels[-1] + len(pool_scales) * self.channels,\n self.channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n\n def psp_forward(self, inputs):\n \"\"\"Forward function of PSP module.\"\"\"\n x = inputs[-1]\n psp_outs = [x]\n psp_outs.extend(self.psp_modules(x))\n psp_outs = torch.cat(psp_outs, dim=1)\n output = self.bottleneck(psp_outs)\n\n return output\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n inputs = self._transform_inputs(inputs)\n \n return self.psp_forward(inputs)"
},
{
"identifier": "SAM",
"path": "pixelformer/networks/SAM.py",
"snippet": "class SAM(nn.Module):\n def __init__(self,\n input_dim=96,\n embed_dim=96,\n v_dim=64,\n window_size=7,\n num_heads=4,\n patch_size=4,\n in_chans=3,\n norm_layer=nn.LayerNorm,\n patch_norm=True):\n super().__init__()\n\n self.embed_dim = embed_dim\n \n if input_dim != embed_dim:\n self.proj_e = nn.Conv2d(input_dim, embed_dim, 3, padding=1)\n else:\n self.proj_e = None\n\n if v_dim != embed_dim:\n self.proj_q = nn.Conv2d(v_dim, embed_dim, 3, padding=1)\n elif embed_dim % v_dim == 0:\n self.proj_q = None\n self.proj = nn.Conv2d(embed_dim, embed_dim, 3, padding=1)\n\n v_dim = embed_dim\n self.sam_block = SAMBLOCK(\n dim=embed_dim,\n num_heads=num_heads,\n v_dim=v_dim,\n window_size=window_size,\n mlp_ratio=4.,\n qkv_bias=True,\n qk_scale=None,\n drop=0.,\n attn_drop=0.,\n drop_path=0.,\n norm_layer=norm_layer)\n\n layer = norm_layer(embed_dim)\n layer_name = 'norm_sam'\n self.add_module(layer_name, layer)\n\n\n def forward(self, e, q):\n if self.proj_q is not None:\n q = self.proj_q(q)\n if self.proj_e is not None:\n e = self.proj_e(e)\n e_proj = e\n q_proj = q\n\n Wh, Ww = q.size(2), q.size(3)\n q = q.flatten(2).transpose(1, 2)\n e = e.flatten(2).transpose(1, 2)\n\n q_out, H, W = self.sam_block(q, e, Wh, Ww)\n norm_layer = getattr(self, f'norm_sam')\n q_out = norm_layer(q_out)\n q_out = q_out.view(-1, H, W, self.embed_dim).permute(0, 3, 1, 2).contiguous()\n\n return q_out+e_proj+q_proj"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
from .swin_transformer import SwinTransformer
from .PQI import PSP
from .SAM import SAM | 3,848 |
########################################################################################################################
class BCP(nn.Module):
""" Multilayer perceptron."""
def __init__(self, max_depth, min_depth, in_features=512, hidden_features=512*4, out_features=256, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.min_depth = min_depth
self.max_depth = max_depth
def forward(self, x):
x = torch.mean(x.flatten(start_dim=2), dim = 2)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
bins = torch.softmax(x, dim=1)
bins = bins / bins.sum(dim=1, keepdim=True)
bin_widths = (self.max_depth - self.min_depth) * bins
bin_widths = nn.functional.pad(bin_widths, (1, 0), mode='constant', value=self.min_depth)
bin_edges = torch.cumsum(bin_widths, dim=1)
centers = 0.5 * (bin_edges[:, :-1] + bin_edges[:, 1:])
n, dout = centers.size()
centers = centers.contiguous().view(n, dout, 1, 1)
return centers
class PixelFormer(nn.Module):
def __init__(self, version=None, inv_depth=False, pretrained=None,
frozen_stages=-1, min_depth=0.1, max_depth=100.0, **kwargs):
super().__init__()
self.inv_depth = inv_depth
self.with_auxiliary_head = False
self.with_neck = False
norm_cfg = dict(type='BN', requires_grad=True)
# norm_cfg = dict(type='GN', requires_grad=True, num_groups=8)
window_size = int(version[-2:])
if version[:-2] == 'base':
embed_dim = 128
depths = [2, 2, 18, 2]
num_heads = [4, 8, 16, 32]
in_channels = [128, 256, 512, 1024]
elif version[:-2] == 'large':
embed_dim = 192
depths = [2, 2, 18, 2]
num_heads = [6, 12, 24, 48]
in_channels = [192, 384, 768, 1536]
elif version[:-2] == 'tiny':
embed_dim = 96
depths = [2, 2, 6, 2]
num_heads = [3, 6, 12, 24]
in_channels = [96, 192, 384, 768]
backbone_cfg = dict(
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
frozen_stages=frozen_stages
)
embed_dim = 512
decoder_cfg = dict(
in_channels=in_channels,
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=embed_dim,
dropout_ratio=0.0,
num_classes=32,
norm_cfg=norm_cfg,
align_corners=False
)
self.backbone = SwinTransformer(**backbone_cfg)
v_dim = decoder_cfg['num_classes']*4
win = 7
sam_dims = [128, 256, 512, 1024]
v_dims = [64, 128, 256, embed_dim]
|
########################################################################################################################
class BCP(nn.Module):
""" Multilayer perceptron."""
def __init__(self, max_depth, min_depth, in_features=512, hidden_features=512*4, out_features=256, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.min_depth = min_depth
self.max_depth = max_depth
def forward(self, x):
x = torch.mean(x.flatten(start_dim=2), dim = 2)
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
bins = torch.softmax(x, dim=1)
bins = bins / bins.sum(dim=1, keepdim=True)
bin_widths = (self.max_depth - self.min_depth) * bins
bin_widths = nn.functional.pad(bin_widths, (1, 0), mode='constant', value=self.min_depth)
bin_edges = torch.cumsum(bin_widths, dim=1)
centers = 0.5 * (bin_edges[:, :-1] + bin_edges[:, 1:])
n, dout = centers.size()
centers = centers.contiguous().view(n, dout, 1, 1)
return centers
class PixelFormer(nn.Module):
def __init__(self, version=None, inv_depth=False, pretrained=None,
frozen_stages=-1, min_depth=0.1, max_depth=100.0, **kwargs):
super().__init__()
self.inv_depth = inv_depth
self.with_auxiliary_head = False
self.with_neck = False
norm_cfg = dict(type='BN', requires_grad=True)
# norm_cfg = dict(type='GN', requires_grad=True, num_groups=8)
window_size = int(version[-2:])
if version[:-2] == 'base':
embed_dim = 128
depths = [2, 2, 18, 2]
num_heads = [4, 8, 16, 32]
in_channels = [128, 256, 512, 1024]
elif version[:-2] == 'large':
embed_dim = 192
depths = [2, 2, 18, 2]
num_heads = [6, 12, 24, 48]
in_channels = [192, 384, 768, 1536]
elif version[:-2] == 'tiny':
embed_dim = 96
depths = [2, 2, 6, 2]
num_heads = [3, 6, 12, 24]
in_channels = [96, 192, 384, 768]
backbone_cfg = dict(
embed_dim=embed_dim,
depths=depths,
num_heads=num_heads,
window_size=window_size,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
frozen_stages=frozen_stages
)
embed_dim = 512
decoder_cfg = dict(
in_channels=in_channels,
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=embed_dim,
dropout_ratio=0.0,
num_classes=32,
norm_cfg=norm_cfg,
align_corners=False
)
self.backbone = SwinTransformer(**backbone_cfg)
v_dim = decoder_cfg['num_classes']*4
win = 7
sam_dims = [128, 256, 512, 1024]
v_dims = [64, 128, 256, embed_dim] | self.sam4 = SAM(input_dim=in_channels[3], embed_dim=sam_dims[3], window_size=win, v_dim=v_dims[3], num_heads=32) | 2 | 2023-12-13 20:50:32+00:00 | 8k |
kramerlab/PeerLearning | run_peer.py | [
{
"identifier": "DQNPeer",
"path": "dqn_peer.py",
"snippet": "class DQNPeer(make_peer_class(DQN)):\n \"\"\"\n A DQN version to be used with peer learning. Therefore, it features\n a critic function\n \"\"\"\n def critic(self, observations, actions):\n q_values = self.q_net(observations).reshape(len(actions), -1, 1)\n tmp = q_values[range(len(actions)), actions, :]\n return tmp, tmp # SAC critic outputs multiple values, so this need\n # to do the same\n\n def get_action(self, *args, **kwargs):\n action, _ = super().get_action(*args, **kwargs)\n return action.reshape(-1), _"
},
{
"identifier": "PeerGroup",
"path": "peer.py",
"snippet": "class PeerGroup:\n \"\"\" A group of peers who train together. \"\"\"\n def __init__(self, peers, use_agent_values=False, init_agent_values=200.,\n lr=0.95, switch_ratio=0, use_advantage=False,\n max_peer_epochs=1_000_000_000):\n \"\"\"\n :param peers: An iterable of peer agents\n :param lr: The learning rate for trust and agent values\n :param switch_ratio: switch_ratio == 0 means no switching\n :param use_advantage: use advantage instead of value for AV updates\n \"\"\"\n self.peers = peers\n self.lr = lr\n self.switch_ratio = switch_ratio\n self.active_peer = None # index of currently learning peer\n self.solo_epoch = False\n self.use_advantage = use_advantage\n self.max_peer_epochs = max_peer_epochs\n\n if use_agent_values:\n self.agent_values = np.full(len(peers), init_agent_values,\n dtype=np.float32)\n key = \"agent_values\"\n\n for peer in peers:\n peer.n_peers = len(peers)\n peer.group = self\n\n # setup agent values\n if use_agent_values:\n peer.peer_values[key] = self.agent_values # noqa (Eq. 6)\n peer.peer_value_functions[key] = self._update_agent_values\n\n def _update_agent_values(self, batch_size=10):\n \"\"\" Updates the agent values with samples from the peers' buffers\"\"\"\n targets = np.zeros_like(self.peers, dtype=np.float32)\n counts = np.zeros_like(self.peers, dtype=np.float32)\n\n for peer in self.peers:\n bs = batch_size // len(self.peers)\n # reward, action, peer, new_obs, old_obs\n if peer.buffer is not None:\n batch = peer.buffer.sample(bs)\n if batch is None: # buffer not sufficiently full\n return\n\n obs = np.array([b[3] for b in batch]).reshape(bs, -1)\n v = peer.value(obs)\n\n if self.use_advantage:\n # previous observations\n prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)\n prev_v = peer.value(prev_obs)\n else:\n prev_v = np.zeros_like(v) # no advantage (see Eq. 5)\n\n for i in range(len(batch)): # Eq. 8\n target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]\n counts[batch[i][2]] += 1\n targets[batch[i][2]] += target\n\n # ensure counts are >= 1, don't change these values\n targets[counts == 0] = self.agent_values[counts == 0]\n counts[counts == 0] = 1\n\n targets /= counts\n self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7\n\n def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):\n \"\"\" The outer peer learning routine. \"\"\"\n assert len(callbacks) == len(self.peers)\n # more solo epochs\n boost_single = 0 < self.switch_ratio < 1\n if boost_single:\n self.switch_ratio = 1 / self.switch_ratio\n\n self.solo_epoch = False\n peer_epochs = 0\n for i in range(n_epochs):\n # don't do peer learning forever\n if peer_epochs < self.max_peer_epochs:\n # ratio of 0 never performs a solo episode\n if (i % (1 + self.switch_ratio) == 1) ^ boost_single:\n self.solo_epoch = True\n else:\n peer_epochs += 1\n else: # budget spent\n self.solo_epoch = True\n\n for p, peer, callback in zip(it.count(), self.peers, callbacks):\n self.active_peer = p\n peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,\n callback=callback, tb_log_name=f\"Peer{p}\",\n reset_num_timesteps=False,\n log_interval=None, **kwargs)\n # update epoch for temperature decay\n peer.epoch += 1\n\n self.active_peer = None\n\n def __len__(self):\n return len(self.peers)"
},
{
"identifier": "make_peer_class",
"path": "peer.py",
"snippet": "def make_peer_class(cls: Type[OffPolicyAlgorithm]):\n \"\"\" Creates a mixin with the corresponding algorithm class.\n :param cls: The learning algorithm (needs to have a callable critic).\n :return: The mixed in peer agent class.\n \"\"\"\n\n class Peer(cls, ABC):\n \"\"\" Abstract Peer class\n needs to be mixed with a suitable algorithm. \"\"\"\n def __init__(self, temperature, temp_decay, algo_args, env,\n use_trust=False, use_critic=False, init_trust_values=200,\n buffer_size=1000, follow_steps=10, seed=None,\n use_trust_buffer=True, solo_training=False,\n peers_sample_with_noise=False,\n sample_random_actions=False, sample_from_suggestions=True,\n epsilon=0.0, env_args=None, only_follow_peers=False):\n if env_args is None:\n env_args = {}\n super(Peer, self).__init__(**algo_args,\n env=make_env(env, **env_args),\n seed=seed)\n # create noise matrix on the correct device\n if hasattr(self.actor, \"reset_noise\"):\n self.actor.reset_noise(self.env.num_envs)\n\n self.solo_training = solo_training\n self.init_values = dict()\n # store all peer values, e.g., trust and agent values in a dict\n self.peer_values = dict()\n # store corresponding functions as well\n self.peer_value_functions = dict()\n\n self.buffer = SuggestionBuffer(buffer_size)\n self.followed_peer = None\n self.__n_peers = None\n self.group = None\n self.epoch = 0\n\n if sample_random_actions:\n epsilon = 1.0\n\n if not solo_training:\n # all peers suggest without noise\n self.peers_sample_with_noise = peers_sample_with_noise\n # actions are sampled instead of taken greedily\n self.sample_actions = sample_from_suggestions\n self.epsilon = epsilon\n self.use_critic = use_critic\n\n if use_trust:\n self.trust_values = np.array([])\n self.init_values[\"trust\"] = init_trust_values\n self.peer_value_functions[\"trust\"] = self._update_trust\n\n self.use_buffer_for_trust = use_trust_buffer\n\n # sampling parameters\n self.temperature = temperature\n self.temp_decay = temp_decay\n\n self.follow_steps = follow_steps\n self.steps_followed = 0\n\n self.only_follow_peers = only_follow_peers\n\n @property\n def n_peers(self):\n return self.__n_peers\n\n @n_peers.setter\n def n_peers(self, n_peers):\n self.__n_peers = n_peers\n\n # Also reset the trust values\n if \"trust\" in self.init_values.keys():\n self.trust_values = np.full(self.__n_peers,\n self.init_values[\"trust\"],\n dtype=np.float32)\n self.peer_values[\"trust\"] = self.trust_values\n\n def critique(self, observations, actions) -> np.array:\n \"\"\" Evaluates the actions with the critic. \"\"\"\n with torch.no_grad():\n a = torch.as_tensor(actions, device=self.device)\n o = torch.as_tensor(observations, device=self.device)\n\n # Compute the next Q values: min over all critic targets\n q_values = torch.cat(self.critic(o, a), dim=1) # noqa\n q_values, _ = torch.min(q_values, dim=1, keepdim=True)\n return q_values.cpu().numpy()\n\n def get_action(self, obs, deterministic=False):\n \"\"\" The core function of peer learning acquires the suggested\n actions of the peers and chooses one based on the settings. \"\"\"\n # follow peer for defined number of steps\n followed_steps = self.steps_followed\n self.steps_followed += 1\n self.steps_followed %= self.follow_steps\n if 0 < followed_steps:\n peer = self.group.peers[self.followed_peer]\n det = (peer != self and not self.peers_sample_with_noise) or \\\n deterministic\n action, _ = peer.policy.predict(obs, deterministic=det)\n return action, None\n\n # get actions\n actions = []\n for peer in self.group.peers:\n # self always uses exploration, the suggestions of the other\n # peers only do if the critic method isn't used.\n det = (peer != self and not self.peers_sample_with_noise) or \\\n deterministic\n action, _ = peer.policy.predict(obs, deterministic=det)\n actions.append(action)\n actions = np.asarray(actions).squeeze(1)\n\n # critic (Eq. 3)\n if self.use_critic:\n observations = np.tile(obs, (self.n_peers, 1))\n q_values = self.critique(observations, actions).reshape(-1)\n self.peer_values['critic'] = q_values # part of Eq. 9\n\n # calculate peer values, e.g., trust and agent values\n values = np.zeros(self.n_peers)\n for key in self.peer_values.keys():\n # part of Eq. 9 incl. Footnote 7\n values += self.__normalize(self.peer_values[key])\n\n if self.sample_actions:\n # sample action from probability distribution (Eq. 2)\n temp = self.temperature * np.exp(-self.temp_decay * self.epoch)\n p = np.exp(values / temp)\n p /= np.sum(p)\n self.followed_peer = np.random.choice(self.n_peers, p=p)\n elif self.only_follow_peers:\n p = np.full(self.n_peers, 1 / (self.n_peers - 1))\n p[self.group.peers.index(self)] = 0\n self.followed_peer = np.random.choice(self.n_peers, p=p)\n else:\n # act (epsilon) greedily\n if np.random.random(1) >= self.epsilon:\n self.followed_peer = np.argmax(values)\n else:\n self.followed_peer = np.random.choice(self.n_peers)\n\n action = actions[self.followed_peer].reshape(1, -1)\n\n return action, None\n\n @staticmethod\n def __normalize(values):\n \"\"\" Normalize the values based on their absolute maximum. \"\"\"\n return values / np.max(np.abs(values))\n\n def value(self, observations) -> np.ndarray:\n \"\"\" Calculates the value of the observations. \"\"\"\n actions, _ = self.policy.predict(observations, False)\n return self.critique(observations, actions)\n\n def _update_trust(self, batch_size=10):\n \"\"\" Updates the trust values with samples from the buffer.\n (Eq. 5 and 8)\n \"\"\"\n if self.use_buffer_for_trust:\n batch = self.buffer.sample(batch_size)\n else:\n batch = self.buffer.latest()\n batch_size = 1\n if batch is None: # buffer not sufficiently full\n return\n\n # next observations\n obs = np.array([b[3] for b in batch]).reshape(batch_size, -1)\n v = self.value(obs)\n\n if self.group.use_advantage:\n # previous observations\n prev_obs = np.array([b[4] for b in batch]).reshape(batch_size,\n -1)\n prev_v = self.value(prev_obs)\n else:\n prev_v = np.zeros_like(v) # no comparison to own act (Eq. 5)\n\n targets = np.zeros(self.n_peers)\n counts = np.zeros(self.n_peers)\n for i in range(batch_size):\n target = (batch[i][0] + self.gamma * v[i]) - prev_v[i] # Eq. 8\n counts[batch[i][2]] += 1\n targets[batch[i][2]] += target\n\n # ensure counts are >= 1, don't change these values\n targets[counts == 0] = self.trust_values[counts == 0]\n counts[counts == 0] = 1\n\n targets /= counts\n # Eq. 4\n self.trust_values += self.group.lr * (targets - self.trust_values)\n\n def _on_step(self):\n \"\"\" Adds updates of the peer values, e.g., trust or agent\n values. \"\"\"\n super(Peer, self)._on_step() # noqa\n\n if not self.group.solo_epoch:\n # update values, e.g., trust and agent values after ever step\n for key in self.peer_value_functions.keys():\n self.peer_value_functions[key]()\n\n def _store_transition(self, replay_buffer, buffer_action, new_obs,\n reward, dones, infos):\n \"\"\" Adds suggestion buffer handling. \"\"\"\n\n # get previous observations\n old_obs = self._last_obs\n\n super(Peer, self)._store_transition(replay_buffer, # noqa\n buffer_action, new_obs,\n reward, dones, infos)\n\n if not self.group.solo_epoch:\n # store transition in suggestion buffer as well\n self.buffer.add(reward, buffer_action, self.followed_peer,\n new_obs, old_obs)\n\n def _predict_train(self, observation, state=None,\n episode_start=None, deterministic=False):\n \"\"\" The action selection during training involves the peers. \"\"\"\n if deterministic:\n return self.policy.predict(observation, state=state,\n episode_start=episode_start,\n deterministic=deterministic)\n else:\n return self.get_action(observation)\n\n def learn(self, solo_episode=False, **kwargs):\n \"\"\" Adds action selection with help of peers. \"\"\"\n predict = self.predict # safe for later\n\n # use peer suggestions only when wanted\n if not (self.solo_training or solo_episode):\n self.predict = self._predict_train\n else:\n self.followed_peer = self.group.peers.index(self)\n\n result = super(Peer, self).learn(**kwargs)\n\n self.predict = predict # noqa\n return result\n\n def _excluded_save_params(self):\n \"\"\" Excludes attributes that are functions. Otherwise, the save\n method fails. \"\"\"\n ex_list = super(Peer, self)._excluded_save_params()\n ex_list.extend([\"peer_value_functions\", \"peer_values\",\n \"group\", \"predict\"])\n return ex_list\n\n return Peer"
},
{
"identifier": "PeerEvalCallback",
"path": "callbacks.py",
"snippet": "class PeerEvalCallback(EvalCallback):\n \"\"\"\n Callback to track collective measurements about peers.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use\n ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param peer_group: The group of peers\n :param eval_env: The environment used for initialization\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the\n callback.\n :param log_path: Path to a folder where the evaluations\n (``evaluations.npz``) will be saved. It will be updated at each\n evaluation.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose:\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has\n not been wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n peer_group: PeerGroup,\n eval_envs: List[Union[gym.Env, VecEnv]],\n n_samples=100,\n **kwargs\n ):\n self.peer_group = peer_group\n self.eval_envs = eval_envs\n self.n_samples = n_samples\n\n self.last_logged_matrix = None\n self.follow_matrix = np.zeros((len(peer_group), len(peer_group)))\n\n self.start_time = time.time()\n\n super().__init__(**kwargs)\n\n def _on_step(self) -> bool:\n self.accumulate_followed_peers() # needs to be done at every step\n\n # log time for debugging etc.\n self.logger.record(\"time/time_elapsed\",\n time.time() - self.start_time,\n exclude=\"tensorboard\")\n\n super()._on_step()\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n if 'agent_values' in self.peer_group.__dict__:\n self.track_agent_values()\n if 'trust_values' in self.peer_group.peers[0].__dict__:\n self.track_trust_values()\n self.track_followed_agent(self.peer_group.active_peer)\n\n peer = self.peer_group.active_peer\n eval_values = {\n f\"Peer{peer}_0/eval/mean_reward\": self.last_mean_reward,\n }\n if peer == len(self.peer_group) - 1:\n eval_values[\"global_step\"] = self.n_calls\n wandb.log(eval_values, commit=True)\n else:\n wandb.log(eval_values, commit=False)\n return True\n\n def track_agent_values(self):\n n_agents = len(self.peer_group.peers)\n for i in range(n_agents):\n agent_value = self.peer_group.agent_values[i]\n wandb.log({'Peer{}_0/eval/agent_value'.format(i): agent_value},\n commit=False)\n return True\n\n def track_trust_values(self):\n peer = self.peer_group.active_peer\n trust_i = self.peer_group.peers[peer].trust_values\n for j, el in np.ndenumerate(trust_i):\n wandb.log({'Peer{}_0/eval/trust_{}'.format(peer, j[0]): el},\n commit=False)\n return True\n\n def accumulate_followed_peers(self):\n peer = self.peer_group.active_peer\n followed_peer = self.peer_group.peers[peer].followed_peer\n if followed_peer is not None:\n self.follow_matrix[peer, followed_peer] += 1\n\n def track_followed_agent(self, active_peer):\n if self.last_logged_matrix is None:\n diff = self.follow_matrix\n else:\n diff = self.follow_matrix - self.last_logged_matrix\n\n for (followed_peer,), count in np.ndenumerate(\n self.follow_matrix[active_peer]):\n wandb.log({'Peer{}_0/eval/follow_count{}'.format(\n active_peer, followed_peer): count}, commit=False)\n # also log difference\n wandb.log({'Peer{}_0/eval/follow_count_{}diff'.format(\n active_peer, followed_peer): diff[active_peer, followed_peer]},\n commit=False)\n self.last_logged_matrix = np.copy(self.follow_matrix)\n\n def commit_global_step(self, timesteps):\n if self.peer_group.active_peer == len(self.peer_group) - 1:\n eval_values = {\"global_step\": self.n_calls + self.eval_freq}\n wandb.log(eval_values, commit=True)\n\n self.n_calls += timesteps"
},
{
"identifier": "str2bool",
"path": "utils.py",
"snippet": "def str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')"
},
{
"identifier": "add_default_values_to_parser",
"path": "utils.py",
"snippet": "def add_default_values_to_parser(parser):\n parser.add_argument(\"--job_id\", type=str,\n default=wandb.util.generate_id())\n parser.add_argument(\"--agent-count\", type=int, help=\"Number of agents.\",\n default=4)\n parser.add_argument(\"--device\", type=str, default=\"auto\",\n choices=[\"cpu\", \"cuda\", \"auto\"],\n help=\"Device to use, either 'cpu', 'cuda' for GPU or \"\n \"'auto'.\")\n parser.add_argument(\"--env\", type=str, default=\"HalfCheetahBulletEnv-v0\",\n help=\"OpenAI Gym environment to perform algorithm on.\")\n parser.add_argument(\"--env_args\", action=StoreDictKeyPair,\n nargs='*', metavar=\"KEY=VAL\", default={})\n parser.add_argument(\"--seed\", type=int, default=1,\n help=\"Random seed in [0, 2 ** 32)\")\n parser.add_argument(\"--wandb\", type=str, default='offline',\n choices=[\"online\", \"offline\", \"disabled\"])\n parser.add_argument(\"--discrete-actions\", type=str2bool, nargs=\"?\",\n const=False, default=False)\n parser.add_argument(\"--save-dir\", type=Path,\n default=Path.cwd().joinpath(\"Experiments\"))\n\n # Agents\n agent_parser = parser.add_argument_group(\"Agent\")\n agent_parser.add_argument(\"--mix-agents\", type=str, nargs='*',\n default=[\"SAC\"])\n\n agent_parser.add_argument(\"--net-arch\", type=int, nargs='*',\n action='append')\n agent_parser.add_argument(\"--load_paths\", type=str, nargs='*',\n default=[])\n agent_parser.add_argument(\"--agents_to_store\", type=int, nargs='*',\n default=[])\n\n return parser"
},
{
"identifier": "log_reward_avg_in_wandb",
"path": "utils.py",
"snippet": "def log_reward_avg_in_wandb(callbacks):\n results = []\n for callback in callbacks:\n eval_callback = callback[-1]\n result = eval_callback.evaluations_results\n results.append(np.mean(result))\n wandb.log({'reward_avg': np.mean(results)})"
},
{
"identifier": "add_default_values_to_train_parser",
"path": "utils.py",
"snippet": "def add_default_values_to_train_parser(training_parser):\n training_parser.add_argument(\"--steps\", type=int, default=3_000_000,\n help=\"Total number of time steps to train \"\n \"the agent.\")\n training_parser.add_argument(\"--eval-interval\", type=int,\n default=10_000,\n help=\"Interval in time steps between \"\n \"evaluations.\")\n training_parser.add_argument(\"--n-eval-episodes\", type=int,\n default=10,\n help=\"Number of episodes for each \"\n \"evaluation.\")\n training_parser.add_argument(\"--buffer-size\", type=int,\n default=1_000_000)\n training_parser.add_argument(\"--buffer-start-size\", type=int,\n default=1_000,\n help=\"Minimum replay buffer size before \"\n \"performing gradient updates.\")\n training_parser.add_argument(\"--batch-size\", type=int,\n default=100,\n help=\"Minibatch size\")\n training_parser.add_argument(\"--min-epoch-length\", type=int,\n default=10_000,\n help=\"Minimal length of a training_parser \"\n \"epoch.\")\n training_parser.add_argument(\"--learning_rate\", type=str2func, nargs='*',\n default=[3e-4],\n help='Learning rate for adam optimizer, '\n 'the same learning rate will be used '\n 'for all networks (Q-Values, Actor and '\n 'Value function) it can be a function'\n ' of the current progress remaining '\n '(from 1 to 0)')\n training_parser.add_argument(\"--tau\", type=float, default=0.005)\n training_parser.add_argument(\"--gamma\", type=float, default=0.99)\n training_parser.add_argument(\"--gradient_steps\", type=int,\n default=1)\n training_parser.add_argument(\"--train_freq\", type=int,\n default=1)\n training_parser.add_argument(\"--target_update_interval\", type=int,\n default=1)\n dqn_parser = training_parser.add_argument_group(\"DQN\")\n dqn_parser.add_argument(\"--exploration-fraction\", type=float, default=0.1)\n dqn_parser.add_argument(\"--exploration-final-eps\", type=float,\n default=0.05)\n return training_parser"
},
{
"identifier": "new_random_seed",
"path": "utils.py",
"snippet": "def new_random_seed():\n return np.random.randint(np.iinfo(np.int32).max)"
},
{
"identifier": "make_env",
"path": "utils.py",
"snippet": "def make_env(env_str, n_envs=1, **env_args):\n envs = []\n for _ in range(n_envs):\n def env_func():\n env = Monitor(gym.make(env_str, **env_args))\n env.seed(new_random_seed())\n return env\n\n envs.append(env_func)\n return DummyVecEnv(envs)"
},
{
"identifier": "ControllerArguments",
"path": "utils.py",
"snippet": "class ControllerArguments:\n def __init__(self, number_agents):\n self.number_agents = number_agents\n\n def argument_for_every_agent(self, arguments, i):\n if type(arguments) is list:\n if len(arguments) == 1:\n return arguments[0]\n elif len(arguments) == self.number_agents:\n return arguments[i]\n else:\n raise AssertionError(f'number of arguments ({len(arguments)}) '\n f'has to be 1 or == number of agents '\n f'({self.number_agents}) input is'\n f' {arguments}')\n else:\n raise AssertionError(f'input is not a list input is{arguments} '\n f'{type(arguments)}')"
}
] | import argparse
import datetime
import gym
import wandb
import predefined_agents # noqa: F401
import env as local_envs # noqa: F401
from pathlib import Path
from stable_baselines3 import SAC, TD3
from stable_baselines3.common.utils import set_random_seed, \
update_learning_rate
from wandb.integration.sb3 import WandbCallback
from dqn_peer import DQNPeer
from peer import PeerGroup, make_peer_class
from callbacks import PeerEvalCallback
from utils import str2bool, add_default_values_to_parser, \
log_reward_avg_in_wandb, add_default_values_to_train_parser, \
new_random_seed, make_env, ControllerArguments | 6,437 |
def add_args():
# create arg parser
parser = argparse.ArgumentParser(description="Peer learning.")
# General
parser.add_argument("--save-name", type=str, default="delete_me")
parser = add_default_values_to_parser(parser)
# Training
training = parser.add_argument_group("Training")
|
def add_args():
# create arg parser
parser = argparse.ArgumentParser(description="Peer learning.")
# General
parser.add_argument("--save-name", type=str, default="delete_me")
parser = add_default_values_to_parser(parser)
# Training
training = parser.add_argument_group("Training") | add_default_values_to_train_parser(training) | 7 | 2023-12-13 10:40:55+00:00 | 8k |
ZS-YANG/FemtoDet-v3 | demo/large_image_demo.py | [
{
"identifier": "inference_detector",
"path": "mmdet/apis/inference.py",
"snippet": "def inference_detector(\n model: nn.Module,\n imgs: ImagesType,\n test_pipeline: Optional[Compose] = None,\n text_prompt: Optional[str] = None,\n custom_entities: bool = False,\n) -> Union[DetDataSample, SampleList]:\n \"\"\"Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str, ndarray, Sequence[str/ndarray]):\n Either image files or loaded images.\n test_pipeline (:obj:`Compose`): Test pipeline.\n\n Returns:\n :obj:`DetDataSample` or list[:obj:`DetDataSample`]:\n If imgs is a list or tuple, the same length list type results\n will be returned, otherwise return the detection results directly.\n \"\"\"\n\n if isinstance(imgs, (list, tuple)):\n is_batch = True\n else:\n imgs = [imgs]\n is_batch = False\n\n cfg = model.cfg\n\n if test_pipeline is None:\n cfg = cfg.copy()\n test_pipeline = get_test_pipeline_cfg(cfg)\n if isinstance(imgs[0], np.ndarray):\n # Calling this method across libraries will result\n # in module unregistered error if not prefixed with mmdet.\n test_pipeline[0].type = 'mmdet.LoadImageFromNDArray'\n\n test_pipeline = Compose(test_pipeline)\n\n if model.data_preprocessor.device.type == 'cpu':\n for m in model.modules():\n assert not isinstance(\n m, RoIPool\n ), 'CPU inference with RoIPool is not supported currently.'\n\n result_list = []\n for i, img in enumerate(imgs):\n # prepare data\n if isinstance(img, np.ndarray):\n # TODO: remove img_id.\n data_ = dict(img=img, img_id=0)\n else:\n # TODO: remove img_id.\n data_ = dict(img_path=img, img_id=0)\n\n if text_prompt:\n data_['text'] = text_prompt\n data_['custom_entities'] = custom_entities\n\n # build the data pipeline\n data_ = test_pipeline(data_)\n\n data_['inputs'] = [data_['inputs']]\n data_['data_samples'] = [data_['data_samples']]\n\n # forward the model\n with torch.no_grad():\n results = model.test_step(data_)[0]\n\n result_list.append(results)\n\n if not is_batch:\n return result_list[0]\n else:\n return result_list"
},
{
"identifier": "init_detector",
"path": "mmdet/apis/inference.py",
"snippet": "def init_detector(\n config: Union[str, Path, Config],\n checkpoint: Optional[str] = None,\n palette: str = 'none',\n device: str = 'cuda:0',\n cfg_options: Optional[dict] = None,\n) -> nn.Module:\n \"\"\"Initialize a detector from config file.\n\n Args:\n config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,\n :obj:`Path`, or the config object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n will not load any weights.\n palette (str): Color palette used for visualization. If palette\n is stored in checkpoint, use checkpoint's palette first, otherwise\n use externally passed palette. Currently, supports 'coco', 'voc',\n 'citys' and 'random'. Defaults to none.\n device (str): The device where the anchors will be put on.\n Defaults to cuda:0.\n cfg_options (dict, optional): Options to override some settings in\n the used config.\n\n Returns:\n nn.Module: The constructed detector.\n \"\"\"\n if isinstance(config, (str, Path)):\n config = Config.fromfile(config)\n elif not isinstance(config, Config):\n raise TypeError('config must be a filename or Config object, '\n f'but got {type(config)}')\n if cfg_options is not None:\n config.merge_from_dict(cfg_options)\n elif 'init_cfg' in config.model.backbone:\n config.model.backbone.init_cfg = None\n\n scope = config.get('default_scope', 'mmdet')\n if scope is not None:\n init_default_scope(config.get('default_scope', 'mmdet'))\n\n model = MODELS.build(config.model)\n model = revert_sync_batchnorm(model)\n if checkpoint is None:\n warnings.simplefilter('once')\n warnings.warn('checkpoint is None, use COCO classes by default.')\n model.dataset_meta = {'classes': get_classes('coco')}\n else:\n checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n # Weights converted from elsewhere may not have meta fields.\n checkpoint_meta = checkpoint.get('meta', {})\n\n # save the dataset_meta in the model for convenience\n if 'dataset_meta' in checkpoint_meta:\n # mmdet 3.x, all keys should be lowercase\n model.dataset_meta = {\n k.lower(): v\n for k, v in checkpoint_meta['dataset_meta'].items()\n }\n elif 'CLASSES' in checkpoint_meta:\n # < mmdet 3.x\n classes = checkpoint_meta['CLASSES']\n model.dataset_meta = {'classes': classes}\n else:\n warnings.simplefilter('once')\n warnings.warn(\n 'dataset_meta or class names are not saved in the '\n 'checkpoint\\'s meta data, use COCO classes by default.')\n model.dataset_meta = {'classes': get_classes('coco')}\n\n # Priority: args.palette -> config -> checkpoint\n if palette != 'none':\n model.dataset_meta['palette'] = palette\n else:\n test_dataset_cfg = copy.deepcopy(config.test_dataloader.dataset)\n # lazy init. We only need the metainfo.\n test_dataset_cfg['lazy_init'] = True\n metainfo = DATASETS.build(test_dataset_cfg).metainfo\n cfg_palette = metainfo.get('palette', None)\n if cfg_palette is not None:\n model.dataset_meta['palette'] = cfg_palette\n else:\n if 'palette' not in model.dataset_meta:\n warnings.warn(\n 'palette does not exist, random is used by default. '\n 'You can also set the palette to customize.')\n model.dataset_meta['palette'] = 'random'\n\n model.cfg = config # save the config in the model for convenience\n model.to(device)\n model.eval()\n return model"
},
{
"identifier": "VISUALIZERS",
"path": "mmdet/registry.py",
"snippet": "VISUALIZERS = Registry(\n 'visualizer',\n parent=MMENGINE_VISUALIZERS,\n locations=['mmdet.visualization'])"
},
{
"identifier": "merge_results_by_nms",
"path": "mmdet/utils/large_image.py",
"snippet": "def merge_results_by_nms(results: SampleList, offsets: Sequence[Tuple[int,\n int]],\n src_image_shape: Tuple[int, int],\n nms_cfg: dict) -> DetDataSample:\n \"\"\"Merge patch results by nms.\n\n Args:\n results (List[:obj:`DetDataSample`]): A list of patch results.\n offsets (Sequence[Tuple[int, int]]): Positions of the left top points\n of patches.\n src_image_shape (Tuple[int, int]): A (height, width) tuple of the large\n image's width and height.\n nms_cfg (dict): it should specify nms type and other parameters\n like `iou_threshold`.\n Returns:\n :obj:`DetDataSample`: merged results.\n \"\"\"\n shifted_instances = shift_predictions(results, offsets, src_image_shape)\n\n _, keeps = batched_nms(\n boxes=shifted_instances.bboxes,\n scores=shifted_instances.scores,\n idxs=shifted_instances.labels,\n nms_cfg=nms_cfg)\n merged_instances = shifted_instances[keeps]\n\n merged_result = results[0].clone()\n merged_result.pred_instances = merged_instances\n return merged_result"
},
{
"identifier": "shift_predictions",
"path": "mmdet/utils/large_image.py",
"snippet": "def shift_predictions(det_data_samples: SampleList,\n offsets: Sequence[Tuple[int, int]],\n src_image_shape: Tuple[int, int]) -> SampleList:\n \"\"\"Shift predictions to the original image.\n\n Args:\n det_data_samples (List[:obj:`DetDataSample`]): A list of patch results.\n offsets (Sequence[Tuple[int, int]]): Positions of the left top points\n of patches.\n src_image_shape (Tuple[int, int]): A (height, width) tuple of the large\n image's width and height.\n Returns:\n (List[:obj:`DetDataSample`]): shifted results.\n \"\"\"\n try:\n from sahi.slicing import shift_bboxes, shift_masks\n except ImportError:\n raise ImportError('Please run \"pip install -U sahi\" '\n 'to install sahi first for large image inference.')\n\n assert len(det_data_samples) == len(\n offsets), 'The `results` should has the ' 'same length with `offsets`.'\n shifted_predictions = []\n for det_data_sample, offset in zip(det_data_samples, offsets):\n pred_inst = det_data_sample.pred_instances.clone()\n\n # Check bbox type\n if pred_inst.bboxes.size(-1) == 4:\n # Horizontal bboxes\n shifted_bboxes = shift_bboxes(pred_inst.bboxes, offset)\n elif pred_inst.bboxes.size(-1) == 5:\n # Rotated bboxes\n shifted_bboxes = shift_rbboxes(pred_inst.bboxes, offset)\n else:\n raise NotImplementedError\n\n # shift bboxes and masks\n pred_inst.bboxes = shifted_bboxes\n if 'masks' in det_data_sample:\n pred_inst.masks = shift_masks(pred_inst.masks, offset,\n src_image_shape)\n\n shifted_predictions.append(pred_inst.clone())\n\n shifted_predictions = InstanceData.cat(shifted_predictions)\n\n return shifted_predictions"
},
{
"identifier": "get_file_list",
"path": "mmdet/utils/misc.py",
"snippet": "def get_file_list(source_root: str) -> [list, dict]:\n \"\"\"Get file list.\n\n Args:\n source_root (str): image or video source path\n\n Return:\n source_file_path_list (list): A list for all source file.\n source_type (dict): Source type: file or url or dir.\n \"\"\"\n is_dir = os.path.isdir(source_root)\n is_url = source_root.startswith(('http:/', 'https:/'))\n is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS\n\n source_file_path_list = []\n if is_dir:\n # when input source is dir\n for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):\n source_file_path_list.append(os.path.join(source_root, file))\n elif is_url:\n # when input source is url\n filename = os.path.basename(\n urllib.parse.unquote(source_root).split('?')[0])\n file_save_path = os.path.join(os.getcwd(), filename)\n print(f'Downloading source file to {file_save_path}')\n torch.hub.download_url_to_file(source_root, file_save_path)\n source_file_path_list = [file_save_path]\n elif is_file:\n # when input source is single image\n source_file_path_list = [source_root]\n else:\n print('Cannot find image file.')\n\n source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)\n\n return source_file_path_list, source_type"
}
] | import os
import random
import mmcv
import numpy as np
from argparse import ArgumentParser
from pathlib import Path
from mmengine.config import Config, ConfigDict
from mmengine.logging import print_log
from mmengine.utils import ProgressBar
from mmdet.apis import inference_detector, init_detector
from sahi.slicing import slice_image
from mmdet.registry import VISUALIZERS
from mmdet.utils.large_image import merge_results_by_nms, shift_predictions
from mmdet.utils.misc import get_file_list | 4,768 | 'This may take a while.')
progress_bar = ProgressBar(len(files))
for file in files:
# read image
img = mmcv.imread(file)
# arrange slices
height, width = img.shape[:2]
sliced_image_object = slice_image(
img,
slice_height=args.patch_size,
slice_width=args.patch_size,
auto_slice_resolution=False,
overlap_height_ratio=args.patch_overlap_ratio,
overlap_width_ratio=args.patch_overlap_ratio,
)
# perform sliced inference
slice_results = []
start = 0
while True:
# prepare batch slices
end = min(start + args.batch_size, len(sliced_image_object))
images = []
for sliced_image in sliced_image_object.images[start:end]:
images.append(sliced_image)
# forward the model
slice_results.extend(inference_detector(model, images))
if end >= len(sliced_image_object):
break
start += args.batch_size
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None if args.show else os.path.join(args.out_dir, filename)
# export debug images
if args.debug:
# export sliced image results
name, suffix = os.path.splitext(filename)
shifted_instances = shift_predictions(
slice_results,
sliced_image_object.starting_pixels,
src_image_shape=(height, width))
merged_result = slice_results[0].clone()
merged_result.pred_instances = shifted_instances
debug_file_name = name + '_debug' + suffix
debug_out_file = None if args.show else os.path.join(
args.out_dir, debug_file_name)
visualizer.set_image(img.copy())
debug_grids = []
for starting_point in sliced_image_object.starting_pixels:
start_point_x = starting_point[0]
start_point_y = starting_point[1]
end_point_x = start_point_x + args.patch_size
end_point_y = start_point_y + args.patch_size
debug_grids.append(
[start_point_x, start_point_y, end_point_x, end_point_y])
debug_grids = np.array(debug_grids)
debug_grids[:, 0::2] = np.clip(debug_grids[:, 0::2], 1,
img.shape[1] - 1)
debug_grids[:, 1::2] = np.clip(debug_grids[:, 1::2], 1,
img.shape[0] - 1)
palette = np.random.randint(0, 256, size=(len(debug_grids), 3))
palette = [tuple(c) for c in palette]
line_styles = random.choices(['-', '-.', ':'], k=len(debug_grids))
visualizer.draw_bboxes(
debug_grids,
edge_colors=palette,
alpha=1,
line_styles=line_styles)
visualizer.draw_bboxes(
debug_grids, face_colors=palette, alpha=0.15)
visualizer.draw_texts(
list(range(len(debug_grids))),
debug_grids[:, :2] + 5,
colors='w')
visualizer.add_datasample(
debug_file_name,
visualizer.get_image(),
data_sample=merged_result,
draw_gt=False,
show=args.show,
wait_time=0,
out_file=debug_out_file,
pred_score_thr=args.score_thr,
)
if args.save_patch:
debug_patch_out_dir = os.path.join(args.out_dir,
f'{name}_patch')
for i, slice_result in enumerate(slice_results):
patch_out_file = os.path.join(
debug_patch_out_dir,
f'{filename}_slice_{i}_result.jpg')
image = mmcv.imconvert(sliced_image_object.images[i],
'bgr', 'rgb')
visualizer.add_datasample(
'patch_result',
image,
data_sample=slice_result,
draw_gt=False,
show=False,
wait_time=0,
out_file=patch_out_file,
pred_score_thr=args.score_thr,
)
| # Copyright (c) OpenMMLab. All rights reserved.
"""Perform MMDET inference on large images (as satellite imagery) as:
```shell
wget -P checkpoint https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth # noqa: E501, E261.
python demo/large_image_demo.py \
demo/large_image.jpg \
configs/faster_rcnn/faster-rcnn_r101_fpn_2x_coco.py \
checkpoint/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth
```
"""
try:
except ImportError:
raise ImportError('Please run "pip install -U sahi" '
'to install sahi first for large image inference.')
def parse_args():
parser = ArgumentParser(
description='Perform MMDET inference on large images.')
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
parser.add_argument(
'--tta',
action='store_true',
help='Whether to use test time augmentation')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument(
'--patch-size', type=int, default=640, help='The size of patches')
parser.add_argument(
'--patch-overlap-ratio',
type=float,
default=0.25,
help='Ratio of overlap between two patches')
parser.add_argument(
'--merge-iou-thr',
type=float,
default=0.25,
help='IoU threshould for merging results')
parser.add_argument(
'--merge-nms-type',
type=str,
default='nms',
help='NMS type for merging results')
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='Batch size, must greater than or equal to 1')
parser.add_argument(
'--debug',
action='store_true',
help='Export debug results before merging')
parser.add_argument(
'--save-patch',
action='store_true',
help='Save the results of each patch. '
'The `--debug` must be enabled.')
args = parser.parse_args()
return args
def main():
args = parse_args()
config = args.config
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
if args.tta:
assert 'tta_model' in config, 'Cannot find ``tta_model`` in config.' \
" Can't use tta !"
assert 'tta_pipeline' in config, 'Cannot find ``tta_pipeline`` ' \
"in config. Can't use tta !"
config.model = ConfigDict(**config.tta_model, module=config.model)
test_data_cfg = config.test_dataloader.dataset
while 'dataset' in test_data_cfg:
test_data_cfg = test_data_cfg['dataset']
test_data_cfg.pipeline = config.tta_pipeline
# TODO: TTA mode will error if cfg_options is not set.
# This is an mmdet issue and needs to be fixed later.
# build the model from a config file and a checkpoint file
model = init_detector(
config, args.checkpoint, device=args.device, cfg_options={})
if not os.path.exists(args.out_dir) and not args.show:
os.mkdir(args.out_dir)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# get file list
files, source_type = get_file_list(args.img)
# start detector inference
print(f'Performing inference on {len(files)} images.... '
'This may take a while.')
progress_bar = ProgressBar(len(files))
for file in files:
# read image
img = mmcv.imread(file)
# arrange slices
height, width = img.shape[:2]
sliced_image_object = slice_image(
img,
slice_height=args.patch_size,
slice_width=args.patch_size,
auto_slice_resolution=False,
overlap_height_ratio=args.patch_overlap_ratio,
overlap_width_ratio=args.patch_overlap_ratio,
)
# perform sliced inference
slice_results = []
start = 0
while True:
# prepare batch slices
end = min(start + args.batch_size, len(sliced_image_object))
images = []
for sliced_image in sliced_image_object.images[start:end]:
images.append(sliced_image)
# forward the model
slice_results.extend(inference_detector(model, images))
if end >= len(sliced_image_object):
break
start += args.batch_size
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None if args.show else os.path.join(args.out_dir, filename)
# export debug images
if args.debug:
# export sliced image results
name, suffix = os.path.splitext(filename)
shifted_instances = shift_predictions(
slice_results,
sliced_image_object.starting_pixels,
src_image_shape=(height, width))
merged_result = slice_results[0].clone()
merged_result.pred_instances = shifted_instances
debug_file_name = name + '_debug' + suffix
debug_out_file = None if args.show else os.path.join(
args.out_dir, debug_file_name)
visualizer.set_image(img.copy())
debug_grids = []
for starting_point in sliced_image_object.starting_pixels:
start_point_x = starting_point[0]
start_point_y = starting_point[1]
end_point_x = start_point_x + args.patch_size
end_point_y = start_point_y + args.patch_size
debug_grids.append(
[start_point_x, start_point_y, end_point_x, end_point_y])
debug_grids = np.array(debug_grids)
debug_grids[:, 0::2] = np.clip(debug_grids[:, 0::2], 1,
img.shape[1] - 1)
debug_grids[:, 1::2] = np.clip(debug_grids[:, 1::2], 1,
img.shape[0] - 1)
palette = np.random.randint(0, 256, size=(len(debug_grids), 3))
palette = [tuple(c) for c in palette]
line_styles = random.choices(['-', '-.', ':'], k=len(debug_grids))
visualizer.draw_bboxes(
debug_grids,
edge_colors=palette,
alpha=1,
line_styles=line_styles)
visualizer.draw_bboxes(
debug_grids, face_colors=palette, alpha=0.15)
visualizer.draw_texts(
list(range(len(debug_grids))),
debug_grids[:, :2] + 5,
colors='w')
visualizer.add_datasample(
debug_file_name,
visualizer.get_image(),
data_sample=merged_result,
draw_gt=False,
show=args.show,
wait_time=0,
out_file=debug_out_file,
pred_score_thr=args.score_thr,
)
if args.save_patch:
debug_patch_out_dir = os.path.join(args.out_dir,
f'{name}_patch')
for i, slice_result in enumerate(slice_results):
patch_out_file = os.path.join(
debug_patch_out_dir,
f'{filename}_slice_{i}_result.jpg')
image = mmcv.imconvert(sliced_image_object.images[i],
'bgr', 'rgb')
visualizer.add_datasample(
'patch_result',
image,
data_sample=slice_result,
draw_gt=False,
show=False,
wait_time=0,
out_file=patch_out_file,
pred_score_thr=args.score_thr,
)
| image_result = merge_results_by_nms( | 3 | 2023-12-11 15:23:03+00:00 | 8k |
Tps-F/rvc-onnx-test | onnxlib/models_onnx.py | [
{
"identifier": "attentions",
"path": "onnxlib/attentions.py",
"snippet": "class Encoder(nn.Module):\nclass Decoder(nn.Module):\nclass MultiHeadAttention(nn.Module):\nclass FFN(nn.Module):\n def __init__(\n self,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size=1,\n p_dropout=0.0,\n window_size=10,\n **kwargs\n ):\n def forward(self, x, x_mask):\n def __init__(\n self,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size=1,\n p_dropout=0.0,\n proximal_bias=False,\n proximal_init=True,\n **kwargs\n ):\n def forward(self, x, x_mask, h, h_mask):\n def __init__(\n self,\n channels,\n out_channels,\n n_heads,\n p_dropout=0.0,\n window_size=None,\n heads_share=True,\n block_length=None,\n proximal_bias=False,\n proximal_init=False,\n ):\n def forward(\n self, x: torch.Tensor, c: torch.Tensor, attn_mask: Optional[torch.Tensor] = None\n ):\n def attention(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n ):\n def _matmul_with_relative_values(self, x, y):\n def _matmul_with_relative_keys(self, x, y):\n def _get_relative_embeddings(self, relative_embeddings, length: int):\n def _relative_position_to_absolute_position(self, x):\n def _absolute_position_to_relative_position(self, x):\n def _attention_bias_proximal(self, length: int):\n def __init__(\n self,\n in_channels,\n out_channels,\n filter_channels,\n kernel_size,\n p_dropout=0.0,\n activation: str = None,\n causal=False,\n ):\n def padding(self, x: torch.Tensor, x_mask: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor, x_mask: torch.Tensor):\n def _causal_padding(self, x):\n def _same_padding(self, x):"
},
{
"identifier": "commons",
"path": "onnxlib/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef slice_segments2(x, ids_str, segment_size=4):\ndef rand_slice_segments(x, x_lengths=None, segment_size=4):\ndef get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4):\ndef add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\ndef cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):\ndef subsequent_mask(length):\ndef fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):\ndef convert_pad_shape(pad_shape: List[List[int]]) -> List[int]:\ndef shift_1d(x):\ndef sequence_mask(length: torch.Tensor, max_length: Optional[int] = None):\ndef generate_path(duration, mask):\ndef clip_grad_value_(parameters, clip_value, norm_type=2):"
},
{
"identifier": "modules",
"path": "onnxlib/modules.py",
"snippet": "LRELU_SLOPE = 0.1\nclass LayerNorm(nn.Module):\nclass ConvReluNorm(nn.Module):\nclass DDSConv(nn.Module):\nclass WN(torch.nn.Module):\nclass ResBlock1(torch.nn.Module):\nclass ResBlock2(torch.nn.Module):\nclass Log(nn.Module):\nclass Flip(nn.Module):\nclass ElementwiseAffine(nn.Module):\nclass ResidualCouplingLayer(nn.Module):\nclass ConvFlow(nn.Module):\n def __init__(self, channels, eps=1e-5):\n def forward(self, x):\n def __init__(\n self,\n in_channels,\n hidden_channels,\n out_channels,\n kernel_size,\n n_layers,\n p_dropout,\n ):\n def forward(self, x, x_mask):\n def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):\n def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):\n def __init__(\n self,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n gin_channels=0,\n p_dropout=0,\n ):\n def forward(\n self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None\n ):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):\n def forward(self, x: torch.Tensor, x_mask: Optional[torch.Tensor] = None):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def __init__(self, channels, kernel_size=3, dilation=(1, 3)):\n def forward(self, x, x_mask: Optional[torch.Tensor] = None):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n def __init__(self, channels):\n def forward(self, x, x_mask, reverse=False, **kwargs):\n def __init__(\n self,\n channels,\n hidden_channels,\n kernel_size,\n dilation_rate,\n n_layers,\n p_dropout=0,\n gin_channels=0,\n mean_only=False,\n ):\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse: bool = False,\n ):\n def remove_weight_norm(self):\n def __prepare_scriptable__(self):\n def __init__(\n self,\n in_channels,\n filter_channels,\n kernel_size,\n n_layers,\n num_bins=10,\n tail_bound=5.0,\n ):\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n g: Optional[torch.Tensor] = None,\n reverse=False,\n ):"
},
{
"identifier": "get_padding",
"path": "onnxlib/commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)"
},
{
"identifier": "init_weights",
"path": "onnxlib/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
}
] | import logging
import math
import numpy as np
import torch
from torch import nn
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from onnxlib import attentions, commons, modules
from onnxlib.commons import get_padding, init_weights | 3,606 | self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x, _ = flow(x, x_mask, g=g, reverse=reverse)
return x
def remove_weight_norm(self):
for i in range(self.n_flows):
self.flows[i * 2].remove_weight_norm()
class PosteriorEncoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
def remove_weight_norm(self):
self.enc.remove_weight_norm()
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
)
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
logger = logging.getLogger(__name__)
class TextEncoder256(nn.Module):
def __init__(
self,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
f0=True,
):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.emb_phone = nn.Linear(256, hidden_channels)
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
if f0 == True:
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
self.encoder = attentions.Encoder(
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, phone, pitch, lengths):
if pitch == None:
x = self.emb_phone(phone)
else:
x = self.emb_phone(phone) + self.emb_pitch(pitch)
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
x = self.lrelu(x)
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
x.dtype
)
x = self.encoder(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
return m, logs, x_mask
class TextEncoder768(nn.Module):
def __init__(
self,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
f0=True,
):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.emb_phone = nn.Linear(768, hidden_channels)
self.lrelu = nn.LeakyReLU(0.1, inplace=True)
if f0 == True:
self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
self.encoder = attentions.Encoder(
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, phone, pitch, lengths):
if pitch == None:
x = self.emb_phone(phone)
else:
x = self.emb_phone(phone) + self.emb_pitch(pitch)
x = x * math.sqrt(self.hidden_channels) # [b, t, h]
x = self.lrelu(x)
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
x.dtype
)
x = self.encoder(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
return m, logs, x_mask
class ResidualCouplingBlock(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
n_flows=4,
gin_channels=0,
):
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x, _ = flow(x, x_mask, g=g, reverse=reverse)
return x
def remove_weight_norm(self):
for i in range(self.n_flows):
self.flows[i * 2].remove_weight_norm()
class PosteriorEncoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
def remove_weight_norm(self):
self.enc.remove_weight_norm()
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
)
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) | self.ups.apply(init_weights) | 4 | 2023-12-09 04:08:04+00:00 | 8k |
zengydd/ProphDR | train.py | [
{
"identifier": "load_config",
"path": "utils/optimizer.py",
"snippet": "def load_config(path):\n with open(path, 'r') as f:\n return EasyDict(yaml.safe_load(f))"
},
{
"identifier": "get_optimizer",
"path": "utils/optimizer.py",
"snippet": "def get_optimizer(cfg, model):\n if cfg.type == 'adam':\n return torch.optim.Adam(\n model.parameters(),\n lr=cfg.lr,\n weight_decay=cfg.weight_decay,\n betas=(cfg.beta1, cfg.beta2, )\n )\n else:\n raise NotImplementedError('Optimizer not supported: %s' % cfg.type)"
},
{
"identifier": "get_scheduler",
"path": "utils/optimizer.py",
"snippet": "def get_scheduler(cfg, optimizer):\n if cfg.type == 'plateau':\n return torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer,\n factor=cfg.factor,\n patience=cfg.patience,\n min_lr=cfg.min_lr\n )\n else:\n raise NotImplementedError('Scheduler not supported: %s' % cfg.type)"
},
{
"identifier": "load_pickle",
"path": "utils/load.py",
"snippet": "def load_pickle(path):\n\tf = open(path, \"rb\")\n\tdata = pickle.load(f)\n\tf.close()\n\treturn data"
},
{
"identifier": "save_pickle",
"path": "utils/load.py",
"snippet": "def save_pickle(data, path):\n\tf = open(path, \"wb\")\n\tpickle.dump(data, f)\n\tf.close()"
},
{
"identifier": "set_file",
"path": "utils/load.py",
"snippet": "def set_file(root_path, task, method, down_sample):\n if task=='binary': \n if method =='orio':\n res_df = pd.read_csv(root_path + 'unify_thred_Iorio.csv')\n elif method =='only2':\n res_df = pd.read_csv(root_path + 'unify_thred_only2.csv')\n else:\n res_df = pd.read_csv(root_path + 'unify_thred_only2.csv')\n return res_df"
},
{
"identifier": "mydata",
"path": "utils/mydata.py",
"snippet": "class mydata(data.Dataset):\n def __init__(self, list_ID, label, res_df, drug_smiles_df, omic_encode_dict):\n 'Initialization'\n self.list_ID = list_ID\n self.label = label\n self.res_df = res_df \n self.drug_smiles_df = drug_smiles_df\n self.omic_encode_dict = omic_encode_dict\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.list_ID)\n\n def __getitem__(self, index):\n label = self.label[index]\n ID = self.list_ID[index]\n drug_id = self.res_df.iloc[ID]['DRUG_ID']\n cosmic_id = self.res_df.iloc[ID]['COSMIC_ID']\n drug_f = self.drug_smiles_df.loc[drug_id]['smiles']\n omic_f = self.omic_encode_dict[str(cosmic_id)]\n \n return drug_id, cosmic_id, drug_f, omic_f, label"
},
{
"identifier": "dataset_split",
"path": "utils/mydata.py",
"snippet": "def dataset_split(res_df, random=4, stratify=None):\n if stratify == None:\n train_set, val_test_set = train_test_split(res_df, test_size=0.2, random_state=random)\n val_set, test_set = train_test_split(val_test_set, test_size=0.5, random_state=random)\n else:\n train_set, val_test_set = train_test_split(res_df, test_size=0.2, random_state=random, stratify=res_df[stratify])\n # print('ct', val_test_set['binary'].tolist())\n val_set, test_set = train_test_split(val_test_set, test_size=0.5, random_state=random, stratify=val_test_set[stratify])\n print('Responses:{}'.format(res_df.shape[0]))\n print('Train:{}'.format(train_set.shape[0]))\n print('Val:{}'.format(val_set.shape[0]))\n print('Test:{}'.format(test_set.shape[0]))\n print('train_DRUG:{}, val_DRUG:{}, test_DRUG:{}'.format(len(train_set['DRUG_ID'].value_counts()), len(set(val_set['DRUG_ID'])), len(set(test_set['DRUG_ID']))))\n print('train_cell:{}, val_cell:{}, test_cell:{}'.format(len(set(train_set['COSMIC_ID'])), len(set(val_set['COSMIC_ID'])), len(set(test_set['COSMIC_ID']))))\n return train_set, val_set, test_set"
},
{
"identifier": "set_random_seed",
"path": "utils/mydata.py",
"snippet": "def set_random_seed(seed=4):\n \"\"\"Set random seed.\n Parameters\n ----------\n seed : int\n Random seed to use\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n # dgl.random.seed(seed)\n # dgl.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)"
},
{
"identifier": "gbz_main_cross",
"path": "Models/Proph_DR.py",
"snippet": "class gbz_main_cross(object):\n def __init__(self, task, omic_dim, res_df, omic_encode_dict, model_dir):\n # self.model_drug = bert_atom_embedding\n self.task = task\n self.model_dir = model_dir\n self.model = Predictor(h_dim=128, num_heads=4, omic_dim=omic_dim)\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.omic_encode_dict = omic_encode_dict\n self.record_file = os.path.join(self.model_dir, \"valid_markdowntable.txt\")\n self.pkl_file = os.path.join(self.model_dir, \"loss_curve_iter.pkl\")\n self.res_df = res_df\n if self.task=='binary':\n self.label = 'binary' \n self.loss_fct = FocalLoss(logits=True)\n elif self.task=='IC50':\n self.label = 'LN_IC50'\n self.loss_fct = torch.nn.MSELoss() \n elif self.task=='AUC':\n self.label='AUC'\n self.loss_fct = torch.nn.MSELoss()\n\n def validate(self, generator, model):\n torch.cuda.empty_cache()\n loss_fct = self.loss_fct\n model.eval()\n y_label = []\n y_pred = []\n with torch.no_grad():\n for i, (drug_id, cosmic_id, drug_fs, omic_f, label) in enumerate(generator):\n torch.cuda.empty_cache()\n label = Variable(torch.from_numpy(np.array(label)).float()).to(self.device)\n # score = model(drug_id, omic_f, cosmic_id)\n encode_D, valid_lens = encoder_D(drug_id)\n score = model(drug_id, encode_D, omic_f, valid_lens, cosmic_id)\n \n score_flatten = score.flatten().to(self.device)\n loss = loss_fct(score_flatten, label).to(self.device)\n y_label.append(label.view(-1,1))\n y_pred.append(score_flatten.view(-1, 1))\n y_label = torch.cat(y_label, dim=0).cpu().numpy().flatten()\n y_pred = torch.cat(y_pred, dim=0).cpu().numpy().flatten()\n\n # Metrics\n if self.task=='binary':\n metric = {}\n y_pred = torch.sigmoid(torch.tensor(y_pred)).tolist()\n # print('y_label:{},\\ny_pred:{}'.format(y_label, y_pred))\n metric['AUC'] = roc_auc_score(y_label, y_pred)\n metric['pr_score'] = average_precision_score(y_label, y_pred)\n false_positive_rate,true_positive_rate,thresholds = roc_curve(y_label, y_pred)\n recall, precision, thresholds = precision_recall_curve(y_label, y_pred)\n print('roc_curve data:', [false_positive_rate,true_positive_rate,thresholds])\n print('PR_curve data:', [recall, precision])\n to_binary = lambda x: 1 if x > 0.5 else 0 \n y_pred_cls = list(map(to_binary, y_pred))\n metric['acc'] = accuracy_score(y_label, y_pred_cls)\n metric['F1'] = f1_score(y_label, y_pred_cls, average='binary')\n print('metric_resut_{}{}:'.format(self.task, metric))\n else:\n metric = {}\n metric['r2'] = r2_score(y_label, y_pred)\n metric['MAE'] = mean_absolute_error(y_label, y_pred)\n metric['mse'] = mean_squared_error(y_label, y_pred)\n metric['rmse'] = torch.sqrt(torch.tensor(metric['mse']))\n metric['spearman'] = spearmanr(y_label, y_pred)[0]\n metric['pearson'] = pearsonr(y_label, y_pred)[0]\n metric['ci'] = concordance_index(y_label, y_pred)\n print('metric_resut_{}{}:'.format(self.task, metric))\n \n model.train()\n return metric, loss\n\n def train(self, train_set, val_set, **param):\n torch.cuda.empty_cache()\n self.model = self.model.to(self.device)\n\n print(self.model)\n label = self.label\n loss_fct = self.loss_fct\n BATCH_SIZE = param['bs']\n train_epoch = param['te']\n patience = param['pt']\n\n opt = getattr(torch.optim, param['optimizer'])(self.model.parameters(), \n lr=param['lr'], \n weight_decay=param['decay'])\n\n params = {'batch_size': BATCH_SIZE,\n 'shuffle': True,\n 'num_workers': 0,\n 'drop_last': False}\n # loader\n train_generator = data.DataLoader(\n mydata(\n train_set.index.values,\n train_set[label].values, \n self.res_df, \n drug_smiles_df, \n self.omic_encode_dict\n ), \n **params)\n val_generator = data.DataLoader(\n mydata(\n val_set.index.values,\n val_set[label].values, \n self.res_df, \n drug_smiles_df, \n self.omic_encode_dict\n ), \n **params)\n \n max_MSE = 10000\n model_max = copy.deepcopy(self.model)\n writer = SummaryWriter(self.model_dir)\n table = PrettyTable()\n table.title = 'valid'\n \n t_start = time.time()\n loss_history = []\n early_stopping = EarlyStopping(patience=patience, verbose=False)\n for epo in range(train_epoch):\n torch.cuda.empty_cache()\n for i, (drug_id, cosmic_id, drug_fs, omic_f, label) in enumerate(train_generator):\n torch.backends.cudnn.enabled = False\n # score = self.model(drug_id, omic_f, cosmic_id)\n encode_D, valid_lens = encoder_D(drug_id)\n score = self.model(drug_id, encode_D, omic_f, valid_lens, cosmic_id)\n # print('score:'.format(type(score), score))\n label = Variable(torch.from_numpy(np.array(label))).float().to(self.device)\n n = torch.squeeze(score, 1).float()\n n = n.squeeze(-1)\n\n loss = loss_fct(n, label)\n loss_history.append(loss.item())\n writer.add_scalar(\"Loss/train\", loss.item(), epo)\n opt.zero_grad()\n loss.backward()\n opt.step()\n \n if (i % 1000 == 0):\n t_now = time.time()\n print('Training at Epoch ' + str(epo + 1) +\n ' iteration ' + str(i) + \\\n ' with loss ' + str(loss.cpu().detach().numpy())[:7] + \\\n ' with lr ' + str(opt.param_groups[0]['lr']) + \\\n \". Total time \" + str(int(t_now - t_start) / 3600)[:7] + \" hours\")\n\n metric_result, loss_val = self.validate(val_generator, self.model)\n print('Validation at Epoch:{} \\nMetric_result:{}'.format(str(epo + 1), metric_result))\n # mark\n table.field_names = ['# epoch'] + list(metric_result.keys()) + ['loss']\n valid_content_lst = ['epo'+str(epo)]+list(map(float2str, metric_result.values()))+[str(loss_val)]\n table.add_row(valid_content_lst)\n # tensorboard\n for k, v in metric_result.items():\n writer.add_scalar(\"valid/{}\".format(k), v, epo)\n writer.add_scalar(\"Loss/valid\", loss_val.item(), epo)\n\n # early_stop\n early_stopping(loss, self.model, self.model_dir)\n if early_stopping.early_stop:\n print(\"Early stopping at epoch{}\".format(epo))\n break\n \n lowest_val = 1e9\n if loss_val < lowest_val:\n lowest_val = lowest_val\n self.save_model(self.model, self.model_dir)\n print(f'Val Loss: {loss_val}')\n\n # self.model = model_max\n with open(self.record_file, 'w') as fp:\n fp.write(table.get_string())\n with open(self.pkl_file, 'wb') as pck:\n pickle.dump(loss_history, pck)\n\n print('--- Training Finished ---')\n writer.flush()\n writer.close()\n return metric_result, loss_val\n\n def test(self, test_set):\n self.model = self.model.to(self.device)\n label = self.label\n params = {'batch_size': 200,\n 'shuffle': True,\n 'num_workers': 0,\n 'drop_last': False}\n # loader\n test_generator = data.DataLoader(\n mydata(\n test_set.index.values,\n test_set[label].values, \n self.res_df, \n drug_smiles_df, \n self.omic_encode_dict\n ), \n **params) \n print(\"=====testing...\")\n self.model.load_state_dict(torch.load(self.model_dir + '/checkpoint.pt')['model_state_dict'])\n metric_result, loss = self.validate(test_generator, self.model)\n return metric_result, loss\n\n def pred(self, smiles_list, cosmic_id_list, pt_path=os.path.join(root, 'ckpt/checkpoint.pt'), drug_id=0):\n with torch.no_grad():\n score_list = []\n smi_list = []\n cell_list = []\n for smiles in smiles_list:\n smi_list.append(smiles)\n for cosmic_id in cosmic_id_list:\n cell_list.append(str(cosmic_id))\n self.model = self.model.to(self.device)\n omic_f = self.omic_encode_dict[str(cosmic_id)]\n omic_f = omic_f.unsqueeze(0)\n self.model.load_state_dict(torch.load(pt_path, map_location='cpu')['model_state_dict'])\n encode_D_pred, valid_lens = encoder_D_pred(smiles)\n score = self.model(drug_id, encode_D_pred, omic_f, valid_lens, cosmic_id)\n score = score.flatten().to(self.device).cpu().numpy().item()\n score_list.append(score)\n res = pd.DataFrame()\n res['LN(IC50)'] = pd.Series(score_list)\n res['smiles'] = smi_list\n res['cosmic'] = cell_list\n return res\n \n def save_model(self, model, model_dir):\n torch.save({'model_state_dict': model.state_dict()}, model_dir + '/checkpoint.pt')\n print('model_saved:{}'.format(model_dir))"
},
{
"identifier": "cross_EncoderBlock_G",
"path": "Models/cross_attention_dual.py",
"snippet": "class cross_EncoderBlock_G(nn.Module):\n \"\"\"Transformer编码器块\"\"\"\n def __init__(self, query_size, key_size, value_size, num_hiddens,\n num_heads, norm_shape,\n dropout=0.1, bias=False, **kwargs):\n super(cross_EncoderBlock_G, self).__init__(**kwargs)\n\n self.cross_attention = cross_MultiHeadAttention_G(\n query_size, key_size, value_size, num_hiddens, num_heads, dropout, bias)\n self.addnorm_q = AddNorm_Q(norm_shape, query_size, num_hiddens, dropout)\n self.linear = nn.Linear(num_hiddens, num_hiddens)\n def forward(self, q, k, v, valid_lens):\n attn_output, attn_w = self.cross_attention(q, k, v, valid_lens)\n\n out = self.addnorm_q(q, attn_output)\n return out, attn_w"
},
{
"identifier": "cross_EncoderBlock_D",
"path": "Models/cross_attention_dual.py",
"snippet": "class cross_EncoderBlock_D(nn.Module):\n \"\"\"Transformer编码器块\"\"\"\n def __init__(self, query_size, key_size, value_size, num_hiddens,\n num_heads, norm_shape,\n dropout=0.1, bias=False, **kwargs):\n super(cross_EncoderBlock_D, self).__init__(**kwargs)\n # print('query_size', query_size)\n self.cross_attention = cross_MultiHeadAttention_D(\n query_size, key_size, value_size, num_hiddens, num_heads, dropout, bias)\n # self.norm_shape = [self.len_q, self.h_dim]\n self.addnorm_q = AddNorm_Q(norm_shape, query_size, num_hiddens, dropout)\n # self.addnorm = AddNorm(norm_shape, dropout)\n self.linear = nn.Linear(num_hiddens, num_hiddens)\n def forward(self, q, k, v, valid_lens):\n attn_output, attn_w = self.cross_attention(q, k, v, valid_lens)\n # print('attn_output', attn_output.shape)\n # print('attn_w', attn_w.shape)\n out = self.addnorm_q(q, attn_output)\n return out, attn_w"
}
] | import os, sys
import pandas as pd
import numpy as np
import random
import copy
import time
import datetime
import math
import pickle
import optuna
import yaml
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data
from torch.nn.parallel import DataParallel
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
from sklearn.model_selection import train_test_split, KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import SequentialSampler
from prettytable import PrettyTable
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, roc_curve, f1_score, precision_recall_curve
from lifelines.utils import concordance_index
from scipy.stats import pearsonr,spearmanr
from utils.optimizer import load_config, get_optimizer, get_scheduler
from easydict import EasyDict
from collections import defaultdict
from utils.load import load_pickle, save_pickle, set_file
from utils.mydata import mydata, dataset_split, set_random_seed
from Models.Proph_DR import gbz_main_cross
from Models.cross_attention_dual import cross_EncoderBlock_G, cross_EncoderBlock_D | 5,049 | os.environ['NUMEXPR_MAX_THREADS'] = '32'
sys.path.append("..")
torch.set_default_dtype(torch.float32)
config = './utils/train_res.yml'
config = load_config(config)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
float2str = lambda x: '%0.4f' % x
root = os.getcwd()
# root ='/home/yundian/gbz/gbz/'
data_dir = os.path.join(root, 'data_collect/')
unify_dir = os.path.join(root, 'data_collect/unify/')
# drug data
drug_std_dir = os.path.join(unify_dir, 'drug_std/')
drug_smiles_file = os.path.join(data_dir, 'drug_smiles_atom_pad.csv')
drug_smiles_df = pd.read_csv(drug_smiles_file, index_col='drug_id')
atom_pad_dict = load_pickle(data_dir + 'unify/drug_std/atom_pad.pkl')
# omics data
omic_encode = os.path.join(unify_dir, 'omics_std/omics_stk_dict.pkl')
mut_encode = os.path.join(unify_dir, 'omics_std/mut_dict.pkl')
cnv_encode = os.path.join(unify_dir, 'omics_std/cnv_dict.pkl')
exp_encode = os.path.join(unify_dir, 'omics_std/exp_dict.pkl')
mut_cnv = os.path.join(unify_dir, 'omics_std/omics_stk_dict_mut_cnv.pkl')
mut_exp = os.path.join(unify_dir, 'omics_std/omics_stk_dict_mut_exp.pkl')
exp_cnv = os.path.join(unify_dir, 'omics_std/omics_stk_dict_exp_cnv.pkl')
if __name__ == '__main__':
# @@@@@@@@@@@@@@@@@@@
# task: IC50 or binary
# strat: TCGA_DESC, binary, None
# omic_f: omic_encode, omic_encode_origin, mut_encode, cnv_encode, exp_encode
# mut_cnv mut_exp exp_cnv
test_dict = {}
task = 'IC50'
method = 'only2'
torch.cuda.empty_cache()
test_result_list = []
for i in range(0, 10):
seed = i
set_random_seed(seed)
now = datetime.datetime.now()
timestamp = now.strftime('%Y%m%d_%H_%M%S')
model_dir = os.path.join(root, 'result/{}_{}'.format(task, timestamp))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
exp_params = {
'task': task,
'method': method,
'down_sample': False,
'strat': None,
'omic_dim': 3,
'omic_f':omic_encode,
}
response = set_file(root_path =unify_dir ,task=exp_params['task'], method=exp_params['method'], down_sample=exp_params['down_sample'])
response = response.head(100)
| os.environ['NUMEXPR_MAX_THREADS'] = '32'
sys.path.append("..")
torch.set_default_dtype(torch.float32)
config = './utils/train_res.yml'
config = load_config(config)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
float2str = lambda x: '%0.4f' % x
root = os.getcwd()
# root ='/home/yundian/gbz/gbz/'
data_dir = os.path.join(root, 'data_collect/')
unify_dir = os.path.join(root, 'data_collect/unify/')
# drug data
drug_std_dir = os.path.join(unify_dir, 'drug_std/')
drug_smiles_file = os.path.join(data_dir, 'drug_smiles_atom_pad.csv')
drug_smiles_df = pd.read_csv(drug_smiles_file, index_col='drug_id')
atom_pad_dict = load_pickle(data_dir + 'unify/drug_std/atom_pad.pkl')
# omics data
omic_encode = os.path.join(unify_dir, 'omics_std/omics_stk_dict.pkl')
mut_encode = os.path.join(unify_dir, 'omics_std/mut_dict.pkl')
cnv_encode = os.path.join(unify_dir, 'omics_std/cnv_dict.pkl')
exp_encode = os.path.join(unify_dir, 'omics_std/exp_dict.pkl')
mut_cnv = os.path.join(unify_dir, 'omics_std/omics_stk_dict_mut_cnv.pkl')
mut_exp = os.path.join(unify_dir, 'omics_std/omics_stk_dict_mut_exp.pkl')
exp_cnv = os.path.join(unify_dir, 'omics_std/omics_stk_dict_exp_cnv.pkl')
if __name__ == '__main__':
# @@@@@@@@@@@@@@@@@@@
# task: IC50 or binary
# strat: TCGA_DESC, binary, None
# omic_f: omic_encode, omic_encode_origin, mut_encode, cnv_encode, exp_encode
# mut_cnv mut_exp exp_cnv
test_dict = {}
task = 'IC50'
method = 'only2'
torch.cuda.empty_cache()
test_result_list = []
for i in range(0, 10):
seed = i
set_random_seed(seed)
now = datetime.datetime.now()
timestamp = now.strftime('%Y%m%d_%H_%M%S')
model_dir = os.path.join(root, 'result/{}_{}'.format(task, timestamp))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
exp_params = {
'task': task,
'method': method,
'down_sample': False,
'strat': None,
'omic_dim': 3,
'omic_f':omic_encode,
}
response = set_file(root_path =unify_dir ,task=exp_params['task'], method=exp_params['method'], down_sample=exp_params['down_sample'])
response = response.head(100)
| train_set, val_set, test_set = dataset_split(response, random=seed, stratify=exp_params['strat']) | 7 | 2023-12-13 11:56:08+00:00 | 8k |
zhenqincn/FedKSeed | main.py | [
{
"identifier": "Server",
"path": "server.py",
"snippet": "class Server(object):\n def __init__(self, args, eval_loader, candidate_seeds, log_dir):\n self.args = args\n self.eval_loader = eval_loader\n self.candidate_seeds = candidate_seeds\n self.tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)\n self.log_dir = log_dir\n self.tokenizer.model_max_length = self.args.max_length\n special_tokens = dict()\n if self.tokenizer.pad_token is None:\n special_tokens[\"pad_token\"] = DefaultToken.PAD_TOKEN.value\n if self.tokenizer.eos_token is None:\n special_tokens[\"eos_token\"] = DefaultToken.EOS_TOKEN.value\n if self.tokenizer.bos_token is None:\n special_tokens[\"bos_token\"] = DefaultToken.BOS_TOKEN.value\n if self.tokenizer.unk_token is None:\n special_tokens[\"unk_token\"] = DefaultToken.UNK_TOKEN.value\n self.tokenizer.add_special_tokens(special_tokens)\n \n self.model = AutoModelForCausalLM.from_pretrained(args.model, device_map='cpu', torch_dtype=torch.float16, trust_remote_code=True)\n\n from copy import deepcopy\n self.model_w0 = deepcopy(self.model)\n self.seed_pool = {seed: 0.0 for seed in self.candidate_seeds}\n \n self.device = torch.device(f'cuda:{self.args.device}')\n\n if self.args.bias_sampling:\n # initialize the probabilities of seeds\n self.gradient_history = {seed: [self.args.grad_initial] for seed in self.candidate_seeds}\n self.probabilities = [1.0 / float(len(self.candidate_seeds)) for _ in range(len(self.candidate_seeds))]\n else:\n self.gradient_history = None\n self.probabilities = None\n \n def create_model_by_seedpool(self, cur_round):\n tmp_model = deepcopy(self.model_w0)\n tmp_model.to(self.device)\n \n lr = self.args.lr * math.pow(self.args.lr_decay, cur_round - 1)\n if self.args.lr_decay != 1.0:\n raise ValueError('currently seed pool only supports constant learning rate')\n # replace local model with initial weights\n framework = MeZOFramework(tmp_model, args=self.args, lr=lr, candidate_seeds=self.candidate_seeds)\n progress_bar = tqdm(range(len(self.seed_pool))) \n # pull the latest model via accumulated {seed, grad} pairs on the server\n for seed, grad in self.seed_pool.items():\n if grad != 0:\n framework.zo_update(seed=seed, grad=grad)\n progress_bar.update(1)\n progress_bar.set_description(f'pull global model at round{cur_round}')\n tmp_model = tmp_model.cpu()\n return tmp_model\n\n def aggregate_seed_pool(self, selected_client_list):\n if self.args.equal_weight:\n weight_array = np.array([1.0 for _ in selected_client_list], dtype=np.float64)\n weight_array /= float(len(selected_client_list))\n else:\n weight_array = np.array([len(client.train_loader) for client in selected_client_list], dtype=np.float64)\n weight_array /= float(np.sum(weight_array))\n for client_idx in range(len(selected_client_list)):\n local_seed_pool = selected_client_list[client_idx].local_seed_pool\n for seed, grad in local_seed_pool.items():\n self.seed_pool[seed] += grad * weight_array[client_idx]\n for client in selected_client_list:\n client.clear_model()\n\n def update_global_model_by_seed_pool(self):\n self.model = deepcopy(self.model_w0)\n self.model.to(self.device)\n \n framework = MeZOFramework(self.model, args=self.args, lr=self.args.lr, candidate_seeds=self.candidate_seeds)\n progress_bar = tqdm(range(len(self.seed_pool))) \n\n # pull the latest model via accumulated {seed, grad} pairs on the server\n for seed, grad in self.seed_pool.items():\n if grad != 0.0:\n framework.zo_update(seed=seed, grad=grad)\n progress_bar.update(1)\n progress_bar.set_description(f'server update global model')\n\n def prepare_aggregate(self):\n self.model_for_aggregate = deepcopy(self.model)\n for _, v in self.model_for_aggregate.named_parameters():\n if v.requires_grad:\n v.data.zero_()\n\n def online_aggregate(self, client, selected_client_list):\n if self.args.equal_weight:\n weight_array = np.array([1.0 for _ in selected_client_list], dtype=np.float64)\n weight_array /= float(len(selected_client_list))\n else:\n weight_array = np.array([len(client.train_loader) for client in selected_client_list], dtype=np.float64)\n weight_array /= float(np.sum(weight_array))\n \n cur_client_index = 0\n for c in selected_client_list:\n if client.idx == c.idx:\n break\n cur_client_index += 1\n \n cur_weight = weight_array[cur_client_index]\n for k, v in self.model_for_aggregate.named_parameters():\n if v.requires_grad:\n v.data += client.model.state_dict()[k].data * cur_weight\n client.clear_model()\n\n def finish_aggregate(self):\n self.model = self.model_for_aggregate\n\n def calculate_probabilities(self):\n history_list = [self.gradient_history[seed] for seed in self.candidate_seeds]\n mean_grad_history = np.array([np.mean(np.abs(np.clip(history_cur_seed, -self.args.bias_loss_clip, self.args.bias_loss_clip))) for history_cur_seed in history_list])\n self.probabilities = softmax(min_max_norm(mean_grad_history))\n sum_prob = np.sum(self.probabilities)\n if sum_prob != 1.0:\n self.probabilities /= sum_prob\n return self.probabilities\n\n def eval(self, cur_round, eval_avg_acc):\n if self.args.eval_metric == 'loss':\n eval_metric = self.eval_loss(cur_round)\n else:\n eval_metric = self.eval_generate(cur_round)\n \n if self.args.save and cur_round > 0:\n save_dir = self.log_dir\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n if (self.args.eval_metric == 'loss' and eval_metric < np.min(eval_avg_acc)) or (self.args.eval_metric != 'none' and eval_metric > np.max(eval_avg_acc)):\n for file_name in os.listdir(save_dir):\n if 'best' in file_name:\n os.remove(os.path.join(save_dir, file_name)) \n torch.save(self.model.state_dict(), os.path.join(save_dir, f'model_state_dict_best_round{cur_round}.bin'))\n for file_name in os.listdir(save_dir):\n if 'final' in file_name:\n os.remove(os.path.join(save_dir, file_name)) \n torch.save(self.model.state_dict(), os.path.join(save_dir, f'model_state_dict_final_round{cur_round}.bin'))\n return eval_metric\n\n def eval_loss(self, cur_round):\n self.model = self.model.to(self.device)\n self.model.eval()\n \n progress_bar_eval = tqdm(range(len(self.eval_loader)))\n loss_total_eval = 0.0\n num_eval = 0\n \n with torch.inference_mode():\n for batch in self.eval_loader:\n batch = {\n 'input_ids': batch['input_ids'].to(self.device),\n 'labels': batch['labels'].to(self.device),\n 'attention_mask': batch['attention_mask'].to(self.device) \n }\n outputs = self.model(**batch)\n loss = outputs.loss\n progress_bar_eval.update(1)\n if torch.isnan(loss):\n continue\n loss_total_eval += loss\n num_eval += len(batch['input_ids'])\n if num_eval == 0:\n num_eval = 1e-10\n progress_bar_eval.set_description(f'eval at round {cur_round}, loss: {loss_total_eval / num_eval}')\n print()\n print()\n self.model = self.model.cpu()\n return (loss_total_eval / num_eval).item()\n\n def eval_generate(self, cur_round):\n self.model = self.model.to(self.device)\n self.model.eval()\n \n progress_bar_eval = tqdm(range(len(self.eval_loader)))\n acc_total_eval = 0.0\n num_eval = 0\n \n with torch.inference_mode():\n for batch in self.eval_loader:\n input_ids = batch['input_ids'].to(self.device)\n label_ids = batch['labels'].to(self.device)\n output_ids = self.model.generate(\n input_ids=input_ids,\n max_new_tokens=128,\n num_beams=1,\n )\n acc_total_eval += rouge_score(output_ids[0][len(input_ids[0]):], label_ids[0], self.tokenizer)\n progress_bar_eval.update(1)\n num_eval += len(batch['input_ids'])\n if num_eval == 0:\n num_eval = 1e-10\n progress_bar_eval.set_description(f'eval at round {cur_round}, metric: {acc_total_eval / num_eval}')\n print()\n print()\n self.model = self.model.cpu()\n return acc_total_eval / num_eval"
},
{
"identifier": "Client",
"path": "client.py",
"snippet": "class Client(object):\n def __init__(self, idx, args, candidate_seeds, train_loader):\n self.idx = idx\n self.args = args\n self.train_loader = train_loader\n self.train_iterator = iter(self.train_loader)\n self.model = None\n\n self.device = torch.device(f'cuda:{args.device}')\n self.candidate_seeds = candidate_seeds\n\n def local_train_with_seed_pool(self, pulled_model, cur_round, memory_record_dic=None, probabilities=None, gradient_history=None):\n self.model = pulled_model\n self.model.to(self.device)\n \n if memory_record_dic is not None:\n torch.cuda.empty_cache()\n \n # initialize a seed pool\n self.local_seed_pool = {seed: 0.0 for seed in self.candidate_seeds}\n\n lr = self.args.lr\n \n if self.args.batch_or_epoch == 'epoch':\n iter_steps = self.args.local_step * len(self.train_loader)\n else:\n iter_steps = self.args.local_step\n \n if self.args.bias_sampling:\n assert probabilities is not None\n framework = MeZOBiasOptimizer(self.model, args=self.args, lr=lr, candidate_seeds=self.candidate_seeds, probabilities=probabilities, gradient_history=gradient_history)\n else:\n framework = MeZOFramework(self.model, args=self.args, lr=lr, candidate_seeds=self.candidate_seeds)\n self.model.eval()\n with torch.inference_mode():\n if self.args.batch_or_epoch == 'batch':\n loss_total_train = 0.0\n num_trained = 0\n progress_bar = tqdm(range(iter_steps))\n \n for cur_step in range(iter_steps):\n # init epoch progress bar\n if self.args.batch_or_epoch == 'epoch':\n if cur_step % len(self.train_loader) == 0:\n loss_total_train = 0.0\n num_trained = 0\n progress_bar = tqdm(range(len(self.train_loader)))\n try:\n batch = next(self.train_iterator)\n except StopIteration:\n self.train_iterator = iter(self.train_loader)\n batch = next(self.train_iterator)\n batch = {\n 'input_ids': batch['input_ids'].to(self.device),\n 'labels': batch['labels'].to(self.device),\n 'attention_mask': batch['attention_mask'].to(self.device) \n }\n logits, loss = framework.zo_step(batch, local_seed_pool=self.local_seed_pool)\n progress_bar.update(1)\n if (not torch.isnan(loss)) and (self.args.grad_clip <= 0 or loss != 0.0):\n loss_total_train += loss\n num_trained += len(batch['input_ids'])\n if self.args.batch_or_epoch == 'epoch':\n progress_bar.set_description(f'client {self.idx} train at epoch {int(cur_step / len(self.train_loader)) + 1}, loss: {loss_total_train / num_trained if num_trained != 0 else 0.0}')\n else:\n progress_bar.set_description(f'client {self.idx} train at step {cur_step}, loss: {loss_total_train / num_trained if num_trained != 0 else 0.0}')\n # save both CPU and GPU memory\n del framework\n self.model = None\n \n if memory_record_dic is not None:\n memory_record_dic[self.device.index] = {}\n memory_record_dic[self.device.index]['max_memory_allocated'] = torch.cuda.max_memory_allocated(self.device)\n memory_record_dic[self.device.index]['max_memory_reserved'] = torch.cuda.max_memory_reserved(self.device)\n\n def clear_model(self):\n # clear model to same memory\n self.model = None\n\n def migrate(self, device):\n \"\"\"\n migrate a client to a new device\n \"\"\"\n self.device = device\n\n def pull(self, forked_global_model):\n \"\"\"\n pull model from the server\n \"\"\"\n self.model = forked_global_model"
},
{
"identifier": "get_loaders",
"path": "utils_data/load_data.py",
"snippet": "def get_loaders(args, only_eval=False):\n \"\"\"\n Return: list of train_loaders, eval_loader\n \"\"\"\n tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)\n tokenizer.model_max_length = args.max_length\n special_tokens = dict()\n if tokenizer.pad_token is None:\n special_tokens[\"pad_token\"] = DefaultToken.PAD_TOKEN.value\n if tokenizer.eos_token is None:\n special_tokens[\"eos_token\"] = DefaultToken.EOS_TOKEN.value\n if tokenizer.bos_token is None:\n special_tokens[\"bos_token\"] = DefaultToken.BOS_TOKEN.value\n if tokenizer.unk_token is None:\n special_tokens[\"unk_token\"] = DefaultToken.UNK_TOKEN.value\n tokenizer.add_special_tokens(special_tokens)\n\n # Generation task\n if args.dataset == 'dolly':\n from utils_data.llm_dataset import LLMDataset, LLMDataCollator\n if args.eval_metric == 'loss':\n raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)\n else:\n raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)\n\n data_collator = LLMDataCollator(tokenizer=tokenizer)\n\n # only use a subset of raw dataset\n raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])\n y_all = np.array([item['categories'] for item in raw_datasets])\n index_eval = np.where(y_all == args.zerotask)[0]\n # delete the indices of eval samples from the all set\n index_train = np.delete(np.arange(len(y_all)), index_eval)\n raw_datasets = np.array(raw_datasets)\n train_set = raw_datasets[index_train]\n eval_set = raw_datasets[index_eval]\n y_train = np.array([item['categories'] for item in train_set])\n counter = Counter(y_train)\n noniid = args.iid\n if 'dir' in noniid:\n split_dic = partition_idx_labeldir(y_train, n_parties=args.num_clients, alpha=float(noniid[3:]), num_classes=len(counter))\n split_trainsets = []\n for _, sample_indices in split_dic.items():\n split_trainsets.append(Subset(train_set, indices=sample_indices))\n else:\n n_parts = [int(len(train_set) / args.num_clients) for _ in range(args.num_clients - 1)]\n n_parts.append(len(train_set) - sum(n_parts))\n split_trainsets = torch.utils.data.dataset.random_split(train_set, n_parts)\n\n list_train_loader = [\n DataLoader(\n subset, shuffle=True, batch_size=args.batch_size, collate_fn=data_collator\n ) for subset in split_trainsets\n ]\n eval_loader = DataLoader(\n eval_set, batch_size=args.batch_size, collate_fn=data_collator\n )\n \n elif args.dataset in ['instruct']:\n from utils_data.natural_instruction_loader import get_instruction_dataset\n list_train_loader, eval_loader = get_instruction_dataset(args, tokenizer, only_eval=only_eval)\n else:\n raise AttributeError(f'dataset {args.dataset} not implemented')\n return list_train_loader, eval_loader, tokenizer"
}
] | import argparse
import os
import time
import random
import numpy as np
import torch
import yaml
import json
from server import Server
from client import Client
from utils_data.load_data import get_loaders
from copy import deepcopy | 5,253 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Federation
parser.add_argument('--num_clients', type=int, default=200, help='N in our paper')
parser.add_argument('-m', type=float, default=0.05, help='ratio of activate clients in each round')
parser.add_argument('--rounds', type=int, default=40, help='the total number of rounds')
parser.add_argument('--local_step', type=int, default=200, help=r'$\tau in our paper')
parser.add_argument('--batch_or_epoch', type=str, default='batch', choices=['epoch', 'batch'])
parser.add_argument('--equal_weight', default=False, action='store_true', help='if `true`, the weights among clients for aggregation are the same')
# Data
## Arguments related to data on both datasets
parser.add_argument('--dataset', type=str, default='instruct', choices=['instruct', 'dolly'])
parser.add_argument('--batch_size', type=int, default=1, help='batch size > 1 may cause error during running')
parser.add_argument('--max_length', type=int, default=1024, help='the max number of tokens of a data instance')
parser.add_argument('--use_prompts', default=True, help='if `true`, the prompt template from alpaca is adopted')
## Arguments related to data only for Dolly-15K
parser.add_argument('--iid', type=str, default='dir0.5', help=r'`dir{alpha}` means that \alpha in Dirichlet distribution, `0` means IID split')
parser.add_argument('--zerotask', default=7, type=int, help='the index of the task for evaluation in dolly-15K')
parser.add_argument('--dataset_subsample', type=float, default=1.0, help='used for sampling a subset from the original dataset, only effective for dolly-15K')
# Model
parser.add_argument('--model', type=str, default='datajuicer/LLaMA-1B-dj-refine-150B')
# Training
parser.add_argument('--lr', type=float, default=0.001, help=r'learning rate \eta')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay in MeZO')
parser.add_argument('--grad_clip', type=float, default=-100.0, help='clip the over large loss value, if < 0, disable this feature')
# Training args only for `FedKSeed`
parser.add_argument('-K', type=int, default=4096, help='ratio of active clients in each round')
parser.add_argument('--zo_eps', type=float, default=0.0005, help=r'\eps in MeZO')
# Training args only for `FedKSeed-Pro`
parser.add_argument('--bias_sampling', default=False, action='store_true', help='if `true`, the probabilities of candidate seeds to be sampled are not identical, i.e., FedKSeed-Pro')
parser.add_argument('--bias_loss_clip', default=1000.0, type=float, help='scalar gradient whose abstract values exceeds this value will be cliped')
parser.add_argument('--grad_initial', default=0.0, type=float, help='initial value of scalar gradient history corresponding to each candidate seed')
# Environment
parser.add_argument('--device', type=int, default=0, help='index of the targeted cuda device')
parser.add_argument('--log', default=False, action='store_true', help='if `true`, running logs will be recorded in files')
parser.add_argument('--log_root', default='logs', help='root path of log files')
parser.add_argument('--seed', default=42, type=int, help='global seed, for reproducibility')
# Evaluation
parser.add_argument('--eval_metric', default='rouge', type=str, choices=['rouge', 'loss'], help='metric to evaluate global model in the last round')
# Checkpoints
parser.add_argument('--save', default=False, action='store_true', help='if `true`, the checkpoint of tuned models will be stored')
time_stamp = str(time.time())
args = parser.parse_args()
eval_avg_acc = []
memory_record_dic = {}
previous_metric = args.eval_metric
args.eval_metric = 'loss'
# set CUDA visibility to targeted cuda device, to avoid the several hundred MB memory consumption of device 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)
setup_seed(args.seed)
list_train_loader, eval_loader, _ = get_loaders(args)
if args.dataset == 'instruct':
args.iid = 'meta'
log_dir = time_stamp
if args.log_root != '':
log_dir = os.path.join(args.log_root, log_dir)
if args.log:
os.makedirs(log_dir)
config = yaml.dump(args, None)
config = '\n'.join(config.split('\n')[1:])
print('Configs: ')
print(config)
print('=====================')
if args.log:
with open(os.path.join(log_dir, 'config.yaml'), 'w') as writer:
writer.write(config)
# since only CUDA device is available, load all models on device 0
args.device = 0
client_indices_rounds = []
for _ in range(args.rounds):
client_indices_rounds.append(np.random.choice(np.arange(args.num_clients), size=int(args.num_clients * args.m), replace=False))
client_list = []
# sample `K` candidate seeds
candidate_seeds = np.random.randint(1, 100000000000, args.K)
server = Server(args, eval_loader=eval_loader, candidate_seeds=candidate_seeds, log_dir=log_dir)
for idx in range(args.num_clients):
|
os.environ["TOKENIZERS_PARALLELISM"] = "false"
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Federation
parser.add_argument('--num_clients', type=int, default=200, help='N in our paper')
parser.add_argument('-m', type=float, default=0.05, help='ratio of activate clients in each round')
parser.add_argument('--rounds', type=int, default=40, help='the total number of rounds')
parser.add_argument('--local_step', type=int, default=200, help=r'$\tau in our paper')
parser.add_argument('--batch_or_epoch', type=str, default='batch', choices=['epoch', 'batch'])
parser.add_argument('--equal_weight', default=False, action='store_true', help='if `true`, the weights among clients for aggregation are the same')
# Data
## Arguments related to data on both datasets
parser.add_argument('--dataset', type=str, default='instruct', choices=['instruct', 'dolly'])
parser.add_argument('--batch_size', type=int, default=1, help='batch size > 1 may cause error during running')
parser.add_argument('--max_length', type=int, default=1024, help='the max number of tokens of a data instance')
parser.add_argument('--use_prompts', default=True, help='if `true`, the prompt template from alpaca is adopted')
## Arguments related to data only for Dolly-15K
parser.add_argument('--iid', type=str, default='dir0.5', help=r'`dir{alpha}` means that \alpha in Dirichlet distribution, `0` means IID split')
parser.add_argument('--zerotask', default=7, type=int, help='the index of the task for evaluation in dolly-15K')
parser.add_argument('--dataset_subsample', type=float, default=1.0, help='used for sampling a subset from the original dataset, only effective for dolly-15K')
# Model
parser.add_argument('--model', type=str, default='datajuicer/LLaMA-1B-dj-refine-150B')
# Training
parser.add_argument('--lr', type=float, default=0.001, help=r'learning rate \eta')
parser.add_argument('--weight_decay', type=float, default=0.0, help='weight decay in MeZO')
parser.add_argument('--grad_clip', type=float, default=-100.0, help='clip the over large loss value, if < 0, disable this feature')
# Training args only for `FedKSeed`
parser.add_argument('-K', type=int, default=4096, help='ratio of active clients in each round')
parser.add_argument('--zo_eps', type=float, default=0.0005, help=r'\eps in MeZO')
# Training args only for `FedKSeed-Pro`
parser.add_argument('--bias_sampling', default=False, action='store_true', help='if `true`, the probabilities of candidate seeds to be sampled are not identical, i.e., FedKSeed-Pro')
parser.add_argument('--bias_loss_clip', default=1000.0, type=float, help='scalar gradient whose abstract values exceeds this value will be cliped')
parser.add_argument('--grad_initial', default=0.0, type=float, help='initial value of scalar gradient history corresponding to each candidate seed')
# Environment
parser.add_argument('--device', type=int, default=0, help='index of the targeted cuda device')
parser.add_argument('--log', default=False, action='store_true', help='if `true`, running logs will be recorded in files')
parser.add_argument('--log_root', default='logs', help='root path of log files')
parser.add_argument('--seed', default=42, type=int, help='global seed, for reproducibility')
# Evaluation
parser.add_argument('--eval_metric', default='rouge', type=str, choices=['rouge', 'loss'], help='metric to evaluate global model in the last round')
# Checkpoints
parser.add_argument('--save', default=False, action='store_true', help='if `true`, the checkpoint of tuned models will be stored')
time_stamp = str(time.time())
args = parser.parse_args()
eval_avg_acc = []
memory_record_dic = {}
previous_metric = args.eval_metric
args.eval_metric = 'loss'
# set CUDA visibility to targeted cuda device, to avoid the several hundred MB memory consumption of device 0
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.device)
setup_seed(args.seed)
list_train_loader, eval_loader, _ = get_loaders(args)
if args.dataset == 'instruct':
args.iid = 'meta'
log_dir = time_stamp
if args.log_root != '':
log_dir = os.path.join(args.log_root, log_dir)
if args.log:
os.makedirs(log_dir)
config = yaml.dump(args, None)
config = '\n'.join(config.split('\n')[1:])
print('Configs: ')
print(config)
print('=====================')
if args.log:
with open(os.path.join(log_dir, 'config.yaml'), 'w') as writer:
writer.write(config)
# since only CUDA device is available, load all models on device 0
args.device = 0
client_indices_rounds = []
for _ in range(args.rounds):
client_indices_rounds.append(np.random.choice(np.arange(args.num_clients), size=int(args.num_clients * args.m), replace=False))
client_list = []
# sample `K` candidate seeds
candidate_seeds = np.random.randint(1, 100000000000, args.K)
server = Server(args, eval_loader=eval_loader, candidate_seeds=candidate_seeds, log_dir=log_dir)
for idx in range(args.num_clients): | client_list.append(Client(idx, args, candidate_seeds, list_train_loader[idx])) | 1 | 2023-12-08 02:58:31+00:00 | 8k |
merlresearch/PixPNet | pixpnet/symbolic/index_layers.py | [
{
"identifier": "AdaptiveAvgPool2d_factory",
"path": "pixpnet/symbolic/base_layers.py",
"snippet": "def flatten(input, start_dim: int = 0, end_dim: int = -1):\ndef cat(tensors, dim=0):\ndef unsqueeze(x, dim):\ndef np_sp_func(func):\n def wrapper(*input, **kwargs):\n def __init__(self) -> None:\n def forward(self, *input: Any) -> Any:\n def __call__(self, *input, **kwargs):\n def add_module(self, name, module):\n def __init__(self, module_dict=None):\n def values(self):\n def items(self):\n def add_module(self, name, module):\n def __init__(self, *modules):\n def __iter__(self):\n def forward(self, input):\n def __init__(self, shape):\n def __init__(self, start_dim=1, end_dim=-1):\n def forward(self, input):\ndef _ConvNd_factory(tensor_cls, parameters=True):\n def _conv_forward(self, input, weight, bias):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: Tuple[int, ...],\n stride: Tuple[int, ...],\n padding: Tuple[int, ...],\n dilation: Tuple[int, ...],\n transposed: bool,\n output_padding: Tuple[int, ...],\n groups: int,\n bias: bool,\n padding_mode: str,\n ) -> None:\ndef Conv2d_factory(_ConvNd, conv2d):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: _size_2_t,\n stride: _size_2_t = 1,\n padding: Union[str, _size_2_t] = 0,\n dilation: _size_2_t = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = \"zeros\",\n ) -> None:\n def _conv_forward(self, input, weight, bias):\n def forward(self, input):\ndef _NormBase_factory(tensor_cls, parameters=True):\n def __init__(\n self,\n num_features: int,\n eps: float = 1e-5,\n momentum: float = 0.1,\n affine: bool = True,\n track_running_stats: bool = True,\n ) -> None:\n def _check_input_dim(self, input):\ndef _BatchNorm_factory(_NormBase, batch_norm=None):\n def __init__(\n self,\n num_features,\n eps=1e-5,\n momentum=0.1,\n affine=True,\n track_running_stats=True,\n ):\n def forward(self, input):\ndef BatchNorm2d_factory(_BatchNorm):\n def _check_input_dim(input):\ndef Linear_factory(tensor_cls, linear, parameters=True):\n def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:\n def forward(self, input):\n def __init__(self, output_size) -> None:\n def extra_repr(self) -> str:\ndef AdaptiveAvgPool2d_factory(adaptive_avg_pool_2d):\n def forward(self, input):\ndef start_index(out_idx: int, out_len: int, in_len: int) -> int:\ndef end_index(out_idx: int, out_len: int, in_len: int) -> int:\n def __init__(\n self, kernel_size, stride=None, padding=0, dilation=1, return_indices: bool = False, ceil_mode: bool = False\n ) -> None:\ndef MaxPool2d_factory(max_pool2d):\n def forward(self, input):\ndef AvgPool2d_factory(avg_pool2d):\n def __init__(\n self,\n kernel_size: _size_2_t,\n stride: Optional[_size_2_t] = None,\n padding: _size_2_t = 0,\n ceil_mode: bool = False,\n count_include_pad: bool = True,\n divisor_override: Optional[int] = None,\n ) -> None:\n def forward(self, input):\nclass Module:\nclass ModuleDict(Module):\nclass Sequential(ModuleDict):\nclass DummyTensor:\nclass Flatten(Module):\n class _ConvNd(Module, metaclass=ABCMeta):\n class Conv2d(_ConvNd):\n class _NormBase(Module, metaclass=ABCMeta):\n class _BatchNorm(_NormBase, metaclass=ABCMeta):\n class BatchNorm2d(_BatchNorm):\n class Linear(Module):\nclass _AdaptiveAvgPoolNd(Module, metaclass=ABCMeta):\n class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):\nclass _MaxPoolNd(Module, metaclass=ABCMeta):\n class MaxPool2d(_MaxPoolNd):\nclass _AvgPoolNd(Module, metaclass=ABCMeta):\n class AvgPool2d(_AvgPoolNd):"
},
{
"identifier": "NonPositiveDimError",
"path": "pixpnet/symbolic/exceptions.py",
"snippet": "class NonPositiveDimError(ValueError):\n pass"
},
{
"identifier": "_pair",
"path": "pixpnet/symbolic/misc.py",
"snippet": "_SYM_NAME_STACK = []\nV = TypeVar(\"V\")\nT = TypeVar(\"T\")\ndef unique_syms_factory(tensor_cls):\n def __init__(self):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def Tensor(self, *args, **kwargs):\ndef sym_scope(name):\ndef get_sym_scope():\ndef _ntuple(n, name=\"parse\"):\n def parse(x):\ndef _overwrite_named_param(kwargs: Dict[str, Any], param: str, new_value: V) -> None:\ndef _reverse_repeat_tuple(t, n):\ndef _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:\n class unique_syms:"
}
] | from itertools import chain, product
from math import ceil
from typing import List, Sequence, Tuple, Union
from pixpnet.symbolic.base_layers import ( # noqa: F401
AdaptiveAvgPool2d_factory,
AvgPool2d_factory,
BatchNorm2d_factory,
Conv2d_factory,
Flatten,
Linear_factory,
MaxPool2d_factory,
Module,
ModuleDict,
Sequential,
_AdaptiveAvgPoolNd,
_BatchNorm_factory,
_ConvNd_factory,
_MaxPoolNd,
_NormBase_factory,
cat,
concat,
end_index,
flatten,
np_sp_func,
start_index,
unsqueeze,
)
from pixpnet.symbolic.exceptions import NonPositiveDimError
from pixpnet.symbolic.misc import _pair, unique_syms_factory
import numpy as np
import numbers | 4,110 |
def __init__(self, inplace=False):
super(ReLU, self).__init__()
def forward(self, input: Tensor) -> Tensor:
return input
SiLU = ReLU
Sigmoid = ReLU
GELU = ReLU
def conv2d(input, weight, bias, stride, padding=0, dilation=1, groups=1):
sh, sw = _pair(stride)
ph, pw = _pair(padding)
dh, dw = _pair(dilation)
if not (dh == dw == 1):
raise NotImplementedError
c_out, ci_per_group, kh, kw = weight.shape
co_per_group = c_out // groups
h_in, w_in = input.shape[-2:]
h_out = (h_in + 2 * ph - dh * (kh - 1) - 1) // sh + 1
w_out = (w_in + 2 * pw - dw * (kw - 1) - 1) // sw + 1
single_sample = input.ndim == 3
if single_sample:
input = input.reshape((1, *input.shape))
if any(d <= 0 for d in (c_out, h_out, w_out)):
raise NonPositiveDimError((c_out, h_out, w_out))
# input has identical channels or no groups or there is only 1 channel
identical_channels = has_identical_channels(input)
output = OutputTensor(
(input.shape[0], c_out, h_out, w_out), dtype=input.dtype, identical_channels=identical_channels or groups == 1
)
if not output.identical_channels:
print(
"WARNING: input does not have identical channels in conv2d and "
"groups != 1, this will take longer to compute"
)
for oh in range(h_out):
ih0 = (oh * sh) - ph
ih1 = ih0 + kh
if ih0 < 0:
ih0 = 0
for ow in range(w_out):
iw0 = (ow * sw) - pw
iw1 = iw0 + kw
if iw0 < 0:
iw0 = 0
# slice: n x c x kh x kw
# weight: c_out x c x kh x kw
if identical_channels:
# we can ignore groups. take first channel (arbitrary as all
# channels are the same)
x_slice = input[:, 0, ih0:ih1, iw0:iw1]
for n in range(output.shape[0]):
hc_n = HypercubeCollection(x_slice[n].flatten().tolist())
output[n, :, oh, ow] = hc_n
elif groups == 1:
x_slice = input[:, :, ih0:ih1, iw0:iw1]
for n in range(output.shape[0]):
hc_n = HypercubeCollection(x_slice[n].flatten().tolist())
output[n, :, oh, ow] = hc_n
else:
for g in range(groups):
co0 = g * co_per_group
co1 = co0 + co_per_group
ci0 = g * ci_per_group
ci1 = ci0 + ci_per_group
x_slice = input[:, ci0:ci1, ih0:ih1, iw0:iw1]
for n in range(output.shape[0]):
hc_n = HypercubeCollection(x_slice[n, :].flatten().tolist())
for c in range(co0, co1):
output[n, c, oh, ow] = hc_n
if single_sample:
output = output.squeeze(axis=0)
return output
def linear(input: Tensor, weight: Tensor, bias: Tensor) -> np.ndarray:
# out_features x in_features
output = OutputTensor((input.shape[0], weight.shape[0]), dtype=input.dtype, identical_channels=True)
for n in range(output.shape[0]):
hc_n = HypercubeCollection(input[n].tolist())
output[n, :] = hc_n
return output
def adaptive_avg_pool_2d(
input: np.ndarray, # Tensor
output_size=None,
):
input_height, input_width = input.shape[-2:]
output_height, output_width = _pair(output_size)
output_height = output_height or input_height
output_width = output_width or input_width
single_sample = input.ndim == 3
if single_sample:
input = input.reshape((1, *input.shape))
identical_channels = has_identical_channels(input)
output = OutputTensor(
(input.shape[0], input.shape[1], output_height, output_width),
dtype=input.dtype,
identical_channels=identical_channels,
)
if not identical_channels:
print(
"WARNING: input does not have identical channels in "
"adaptive_avg_pool_2d, this will take longer to compute"
)
for oh in range(output_height):
| # Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
# Copyright (c) PyTorch Contributors 2022
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# SPDX-License-Identifier: BSD-3-Clause
# Some code based on PyTorch https://github.com/pytorch/pytorch
# noinspection PyUnresolvedReferences
class Tensor(np.ndarray):
__slots__ = ("_names",)
def __new__(cls, shape, name=None, **kwargs):
obj = super().__new__(cls, shape=shape, dtype=object, **kwargs)
names = [*product(*(range(d) for d in shape))]
vcs = [
HypercubeCollection.from_hypercube(Hypercube.from_slices(map(slice, indices)))
for indices in product(*map(range, shape))
]
obj.flat[:] = vcs
obj._names = names
return obj
@property
def names(self):
return self._names
_FULL_SLICE = slice(None)
class OutputTensor(np.ndarray):
__slots__ = "identical_channels", "_underlying"
def __new__(cls, shape, dtype=None, identical_channels=False, **kwargs):
if identical_channels:
shape_orig = shape
n, c, *dims = shape
assert c > 0
shape = (n, 1, *dims)
obj = super().__new__(cls, shape=shape, dtype=dtype, **kwargs)
obj._underlying = None
if identical_channels:
underlying = obj
obj = np.broadcast_to(underlying, shape_orig, subok=True)
obj._underlying = underlying
obj.identical_channels = identical_channels
return obj
def __setitem__(self, key, value):
if self._underlying is None:
super().__setitem__(key, value)
else: # identical_channels memory view trick
if len(key) >= 2:
assert key[1] == _FULL_SLICE
self._underlying[key] = value
def __iadd__(self, other):
if self._underlying is None:
if self.flags["WRITEABLE"]:
super().__iadd__(other)
else:
out = self + other
if self.identical_channels and isinstance(other, OutputTensor) and other.identical_channels:
out.identical_channels = True
return out
else: # identical_channels memory view trick
if isinstance(other, OutputTensor) and other.identical_channels:
self._underlying += other._underlying
elif (isinstance(other, np.ndarray) and other.ndim >= 2 and other.shape[1] == 1) or not isinstance(
other, np.ndarray
):
self._underlying += other
else:
return self + other
return self
def __isub__(self, other):
if self._underlying is None:
if self.flags["WRITEABLE"]:
super().__isub__(other)
else:
out = self - other
if self.identical_channels and isinstance(other, OutputTensor) and other.identical_channels:
out.identical_channels = True
return out
else: # identical_channels memory view trick
if isinstance(other, OutputTensor) and other.identical_channels:
self._underlying -= other._underlying
elif (isinstance(other, np.ndarray) and other.ndim >= 2 and other.shape[1] == 1) or not isinstance(
other, np.ndarray
):
self._underlying -= other
else:
return self - other
return self
def __imul__(self, other):
if self._underlying is None:
if self.flags["WRITEABLE"]:
super().__imul__(other)
else:
out = self * other
if self.identical_channels and isinstance(other, OutputTensor) and other.identical_channels:
out.identical_channels = True
return out
else: # identical_channels memory view trick
if isinstance(other, OutputTensor) and other.identical_channels:
self._underlying *= other._underlying
elif (isinstance(other, np.ndarray) and other.ndim >= 2 and other.shape[1] == 1) or not isinstance(
other, np.ndarray
):
self._underlying *= other
else:
return self * other
return self
def __array_finalize__(self, obj):
if obj is None:
return
if not hasattr(self, "_underlying"):
self._underlying = None
if not hasattr(self, "identical_channels"):
self.identical_channels = self.ndim >= 2 and self.shape[1] == 1
def has_identical_channels(tensor):
return (isinstance(tensor, OutputTensor) and tensor.identical_channels) or (
isinstance(tensor, np.ndarray) and tensor.ndim >= 2 and tensor.shape[1] == 1
)
unique_syms = unique_syms_factory(Tensor)
class Hypercube:
"""NOTE: step is not supported in this function"""
__slots__ = "indices", "ndim"
indices: Tuple[Tuple[int, int], ...]
ndim: int
@classmethod
def from_slices(
cls,
indices: Tuple[slice, ...],
) -> "Hypercube":
indices = tuple((s.stop, s.stop + 1) if s.start is None else (s.start, s.stop) for s in indices)
return cls(indices)
def __init__(
self,
indices: Tuple[Tuple[int, int], ...],
):
self.indices = indices
self.ndim = len(self.indices)
def serialize(self):
return [[*idx] for idx in self.indices]
@classmethod
def deserialize(cls, indices):
return cls(indices)
def union(self, other: "Hypercube") -> Union["Hypercube", Tuple["Hypercube", "Hypercube"]]:
if self == other:
return self
# contains_other # self contains other
# other_contains # other contains self
unequal_count = 0
contains_other = other_contains = concat_dim = None
for d in range(self.ndim):
(r00, r01), (r10, r11) = self.indices[d], other.indices[d]
r_int = r00 if r00 > r10 else r10, r01 if r01 < r11 else r11
adjacent = (r00 == r11) or (r10 == r01)
if not (len(r_int) or adjacent): # no intersection, cannot combine
return self, other
unequal = (r00 != r10) or (r01 != r11)
unequal_count += unequal
if unequal:
concat_dim = d
if contains_other is None or contains_other:
contains_other = r_int == (r10, r11) # r1 contained within r0
if other_contains is None or other_contains:
other_contains = r_int == (r00, r01) # r0 contained within r1
if contains_other:
return self
if other_contains:
return other
if unequal_count == 1:
# This means we can concatenate the hypercubes along a single axis
(r00, r01), (r10, r11) = (self.indices[concat_dim], other.indices[concat_dim])
indices = (
self.indices[:concat_dim]
+ ((r00 if r00 < r10 else r10, r01 if r01 > r11 else r11),)
+ self.indices[concat_dim + 1 :]
)
return Hypercube(indices)
return self, other
def _intersection_indices(self: "Hypercube", other: "Hypercube"):
indices = []
for d in range(self.ndim):
(r00, r01), (r10, r11) = self.indices[d], other.indices[d]
r_int = r00 if r00 > r10 else r10, r01 if r01 < r11 else r11
if not len(r_int): # no intersection
return None
indices.append((r00 if r00 > r10 else r10, r01 if r01 < r11 else r11))
return indices
def intersection(self: "Hypercube", other: "Hypercube") -> Union["Hypercube", None]:
indices = self._intersection_indices(other)
return None if indices is None else Hypercube(indices)
def atoms(self):
return {*product(*(range(a, b) for a, b in self.indices))}
def intersects(self, index: Tuple[int, ...]):
return all(r0 <= index[d] < r1 for d, (r0, r1) in enumerate(self.indices))
def corners(self, indices=None):
if indices is None:
indices = self.indices
return product(*indices)
def edges(self, indices=None):
if indices is None:
indices = self.indices
flags = [(0, 1)] * (len(indices) - 1)
# noinspection PyTypeChecker
flags.append((0,)) # only one side so no duplicate edges
for flags_i in product(*flags):
corner = [idx[flag] for idx, flag in zip(indices, flags_i)]
for j, flag in enumerate(flags_i):
corner_other = corner.copy()
corner_other[j] = indices[j][0 if flag else 1]
yield corner, corner_other
def difference(self, other):
indices = self._intersection_indices(other)
if indices is None:
return self # no intersection
corners = self.corners()
edges = self.edges()
int_corners = self.corners(indices)
int_edges = self.edges(indices)
cubes = []
# create cubes corner to corner (1:1 corners)
for corner, int_corner in zip(corners, int_corners):
indices_cube = []
for d0, d1 in zip(corner, int_corner):
if d0 > d1:
d0, d1 = d1, d0 # swap
indices_cube.append((d0, d1))
cubes.append(Hypercube(indices_cube))
# create cubes edge to edge (1:1 edges)
for edge, int_edge in zip(edges, int_edges):
indices_cube = []
for d0, d1 in zip(edge[0], int_edge[1]):
if d0 > d1:
d0, d1 = d1, d0 # swap
indices_cube.append((d0, d1))
cubes.append(Hypercube(indices_cube))
return HypercubeCollection.from_hypercubes(cubes)
def as_slice(self, all_channels=False): # -> Tuple[slice]
return tuple(
slice(None) if all_channels and i == 1 else slice(i_a, i_b) for i, (i_a, i_b) in enumerate(self.indices)
)
def take_from(self, arr, all_channels=False):
assert self.ndim == arr.ndim
return arr[self.as_slice(all_channels=all_channels)]
def __len__(self):
size = 1
for r0, r1 in self.indices:
size *= r1 - r0
return size
def __eq__(self, other):
return self.indices == other.indices
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def __repr__(self):
return f"Hypercube(indices={self.indices})"
def __str__(self):
return repr(self)
class HypercubeCollection:
__slots__ = ("hypercubes",)
@classmethod
def from_hypercube(cls, hypercube: Hypercube) -> "HypercubeCollection":
obj = HypercubeCollection.__new__(cls)
obj.hypercubes = [hypercube]
return obj
@classmethod
def from_hypercubes(cls, hypercubes: Sequence[Hypercube]) -> "HypercubeCollection":
obj = HypercubeCollection.__new__(cls)
obj.hypercubes = hypercubes
return obj
def __init__(self, *args: Sequence["HypercubeCollection"]):
hypercubes = [*chain.from_iterable(hc.hypercubes for hc in chain.from_iterable(args))]
self.hypercubes = self._reduce_hcs(hypercubes)
def serialize(self):
return [hc.serialize() for hc in self.hypercubes]
@classmethod
def deserialize(cls, arr_indices):
return cls.from_hypercubes([Hypercube.deserialize(indices) for indices in arr_indices])
@staticmethod
def _reduce_hcs(hypercubes: List[Hypercube]) -> List[Hypercube]:
uncombined = []
combined = []
while hypercubes:
hc0 = hypercubes.pop()
HypercubeCollection._compare_hcs(
hc0, compare_src=hypercubes, combined_dest=combined, uncombined_dest=uncombined
)
while combined:
hc0 = combined.pop()
HypercubeCollection._compare_hcs(
hc0, compare_src=uncombined, combined_dest=combined, uncombined_dest=uncombined
)
return uncombined
@staticmethod
def _compare_hcs(hc0: Hypercube, compare_src: List, combined_dest: List, uncombined_dest: List):
idxs_to_drop = []
for i, hc1 in enumerate(compare_src):
hcc = hc0 | hc1
if not isinstance(hcc, tuple):
combined_dest.append(hcc)
idxs_to_drop.append(i)
break
else:
uncombined_dest.append(hc0)
for i in idxs_to_drop:
del compare_src[i]
def atoms(self, *args):
if args:
print("ignored:", args)
return set().union(*(hc.atoms() for hc in self.hypercubes))
def intersects(self, index: Tuple[int]):
for hc in self.hypercubes:
if hc.intersects(index):
return True
return False
def intersecting_indices(self, indices: Sequence[Tuple[int]]):
return [index for index in indices if self.intersects(index)]
def difference(self, other):
if isinstance(other, Hypercube):
hypercubes = [hc.difference(other) for hc in self.hypercubes]
else:
hypercubes = [hc.difference(hc_other) for hc in self.hypercubes for hc_other in other.hypercubes]
return HypercubeCollection(hypercubes)
def take_from(self, arr, all_channels=False):
to_intersect = []
for hc in self.hypercubes:
for hc_i in to_intersect:
hc = hc.difference(hc_i)
to_intersect.append(hc)
if len(to_intersect) == 1:
return to_intersect[0].take_from(arr, all_channels=all_channels)
return [to_int.take_from(arr, all_channels=all_channels) for to_int in to_intersect]
def as_slices(self, all_channels=False):
to_intersect = []
for hc in self.hypercubes:
for hc_i in to_intersect:
hc = hc.difference(hc_i)
to_intersect.append(hc)
return [hc.as_slice(all_channels=all_channels) for hc in to_intersect]
def __len__(self):
size = 0
to_intersect = []
for hc in self.hypercubes:
for hc_i in to_intersect:
hc = hc.difference(hc_i)
size += len(hc)
to_intersect.append(hc)
return size
def __or__(self, other: "HypercubeCollection") -> "HypercubeCollection":
"""Union operator ``self | other``"""
return HypercubeCollection((self, other))
def __add__(self, other: "HypercubeCollection") -> "HypercubeCollection":
return HypercubeCollection((self, other))
def __mul__(self, other: "HypercubeCollection") -> "HypercubeCollection":
return HypercubeCollection((self, other))
def __repr__(self):
return f"HypercubeCollection(hypercubes={self.hypercubes})"
def __str__(self):
return repr(self)
class ReLU(Module):
__slots__ = ()
def __init__(self, inplace=False):
super(ReLU, self).__init__()
def forward(self, input: Tensor) -> Tensor:
return input
SiLU = ReLU
Sigmoid = ReLU
GELU = ReLU
def conv2d(input, weight, bias, stride, padding=0, dilation=1, groups=1):
sh, sw = _pair(stride)
ph, pw = _pair(padding)
dh, dw = _pair(dilation)
if not (dh == dw == 1):
raise NotImplementedError
c_out, ci_per_group, kh, kw = weight.shape
co_per_group = c_out // groups
h_in, w_in = input.shape[-2:]
h_out = (h_in + 2 * ph - dh * (kh - 1) - 1) // sh + 1
w_out = (w_in + 2 * pw - dw * (kw - 1) - 1) // sw + 1
single_sample = input.ndim == 3
if single_sample:
input = input.reshape((1, *input.shape))
if any(d <= 0 for d in (c_out, h_out, w_out)):
raise NonPositiveDimError((c_out, h_out, w_out))
# input has identical channels or no groups or there is only 1 channel
identical_channels = has_identical_channels(input)
output = OutputTensor(
(input.shape[0], c_out, h_out, w_out), dtype=input.dtype, identical_channels=identical_channels or groups == 1
)
if not output.identical_channels:
print(
"WARNING: input does not have identical channels in conv2d and "
"groups != 1, this will take longer to compute"
)
for oh in range(h_out):
ih0 = (oh * sh) - ph
ih1 = ih0 + kh
if ih0 < 0:
ih0 = 0
for ow in range(w_out):
iw0 = (ow * sw) - pw
iw1 = iw0 + kw
if iw0 < 0:
iw0 = 0
# slice: n x c x kh x kw
# weight: c_out x c x kh x kw
if identical_channels:
# we can ignore groups. take first channel (arbitrary as all
# channels are the same)
x_slice = input[:, 0, ih0:ih1, iw0:iw1]
for n in range(output.shape[0]):
hc_n = HypercubeCollection(x_slice[n].flatten().tolist())
output[n, :, oh, ow] = hc_n
elif groups == 1:
x_slice = input[:, :, ih0:ih1, iw0:iw1]
for n in range(output.shape[0]):
hc_n = HypercubeCollection(x_slice[n].flatten().tolist())
output[n, :, oh, ow] = hc_n
else:
for g in range(groups):
co0 = g * co_per_group
co1 = co0 + co_per_group
ci0 = g * ci_per_group
ci1 = ci0 + ci_per_group
x_slice = input[:, ci0:ci1, ih0:ih1, iw0:iw1]
for n in range(output.shape[0]):
hc_n = HypercubeCollection(x_slice[n, :].flatten().tolist())
for c in range(co0, co1):
output[n, c, oh, ow] = hc_n
if single_sample:
output = output.squeeze(axis=0)
return output
def linear(input: Tensor, weight: Tensor, bias: Tensor) -> np.ndarray:
# out_features x in_features
output = OutputTensor((input.shape[0], weight.shape[0]), dtype=input.dtype, identical_channels=True)
for n in range(output.shape[0]):
hc_n = HypercubeCollection(input[n].tolist())
output[n, :] = hc_n
return output
def adaptive_avg_pool_2d(
input: np.ndarray, # Tensor
output_size=None,
):
input_height, input_width = input.shape[-2:]
output_height, output_width = _pair(output_size)
output_height = output_height or input_height
output_width = output_width or input_width
single_sample = input.ndim == 3
if single_sample:
input = input.reshape((1, *input.shape))
identical_channels = has_identical_channels(input)
output = OutputTensor(
(input.shape[0], input.shape[1], output_height, output_width),
dtype=input.dtype,
identical_channels=identical_channels,
)
if not identical_channels:
print(
"WARNING: input does not have identical channels in "
"adaptive_avg_pool_2d, this will take longer to compute"
)
for oh in range(output_height): | ih0 = start_index(oh, output_height, input_height) | 0 | 2023-12-06 23:49:31+00:00 | 8k |
Dinghow/UIM | seg_matting_tool/test.py | [
{
"identifier": "dataset",
"path": "util/dataset.py",
"snippet": "IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n BASE_DIR = 'Combined_Dataset'\nclass Composition1KMatting(Dataset):\nclass Combined4ClassesMatting(Dataset):\nclass RealWorldMatting(Dataset):\n def __init__(self,\n root='dataset',\n split='train',\n task='matting',\n num_bgs=10,\n transform=None,\n preprocess=False,\n retname=True): \n def __getitem__(self, index):\n def _composite_fg(self, fg, alpha, idx):\n def _composite(self, fg, bg, a, w, h, trimap):\n def __len__(self):\n def __str__(self):\n def __init__(self,\n root='dataset',\n split='all',\n transform=None,\n retname=True): \n def __getitem__(self, index):\n def __len__(self):\n def __str__(self):\n def __init__(self,\n root='dataset',\n transform=None,\n retname=True): \n def __getitem__(self, index):\n def __len__(self):\n def __str__(self):"
},
{
"identifier": "AverageMeter",
"path": "util/util.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
},
{
"identifier": "compute_mse",
"path": "util/util.py",
"snippet": "def compute_mse(pred, alpha, trimap=None):\n if trimap is not None:\n num_pixels = float((trimap == 128).sum())\n return ((pred - alpha) ** 2 * (trimap == 128) ).sum() / (num_pixels + 1e-8)\n else:\n num_pixels = float(np.prod(alpha.shape))\n return ((pred - alpha) ** 2).sum() / (num_pixels + 1e-8)"
},
{
"identifier": "compute_sad",
"path": "util/util.py",
"snippet": "def compute_sad(pred, alpha, trimap=None):\n diff = np.abs(pred - alpha)\n if trimap is not None:\n return np.sum(diff * (trimap == 128)) / 1000\n else:\n return np.sum(diff) / 1000"
},
{
"identifier": "compute_gradient",
"path": "util/util.py",
"snippet": "def compute_gradient(pred, target, trimap=None):\n pred_x, pred_y = gaussgradient(pred, 1.4)\n target_x, target_y = gaussgradient(target, 1.4)\n\n pred_amp = np.sqrt(pred_x ** 2 + pred_y ** 2)\n target_amp = np.sqrt(target_x ** 2 + target_y ** 2)\n\n error_map = (pred_amp - target_amp) ** 2\n if trimap is not None:\n loss = np.sum(error_map[trimap == 128])\n else:\n loss = np.sum(error_map)\n\n return loss / 1000."
},
{
"identifier": "compute_connectivity",
"path": "util/util.py",
"snippet": "def compute_connectivity(pred, target, trimap=None, step=0.1):\n h, w = pred.shape\n\n thresh_steps = list(np.arange(0, 1 + step, step))\n l_map = np.ones_like(pred, dtype=np.float) * -1\n for i in range(1, len(thresh_steps)):\n pred_alpha_thresh = (pred >= thresh_steps[i]).astype(np.int)\n target_alpha_thresh = (target >= thresh_steps[i]).astype(np.int)\n\n omega = getLargestCC(pred_alpha_thresh * target_alpha_thresh).astype(np.int)\n flag = ((l_map == -1) & (omega == 0)).astype(np.int)\n l_map[flag == 1] = thresh_steps[i - 1]\n\n l_map[l_map == -1] = 1\n\n pred_d = pred - l_map\n target_d = target - l_map\n pred_phi = 1 - pred_d * (pred_d >= 0.15).astype(np.int)\n target_phi = 1 - target_d * (target_d >= 0.15).astype(np.int)\n if trimap is not None:\n loss = np.sum(np.abs(pred_phi - target_phi)[trimap == 128])\n else:\n loss = np.sum(np.abs(pred_phi - target_phi))\n\n return loss / 1000."
},
{
"identifier": "get_cuda_devices",
"path": "util/util.py",
"snippet": "def get_cuda_devices():\n os.system('nvidia-smi -q -d Memory | grep -A4 GPU | grep Free > tmp')\n memory_gpu = np.array([int(x.split()[2]) for x in open('tmp', 'r').readlines()])\n clen = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))\n devices = heapq.nlargest(clen, range(len(memory_gpu)), memory_gpu.take)\n test_gpu = devices\n os.environ['CUDA_VISIBLE_DEVICES']=str(devices)[1:-1]\n os.system('rm tmp')\n return devices"
},
{
"identifier": "get_unknown_tensor_from_pred",
"path": "util/util.py",
"snippet": "def get_unknown_tensor_from_pred(pred, rand_width=30, train_mode=True):\n ### pred: N, 1 ,H, W \n N, C, H, W = pred.shape\n\n pred = pred.data.cpu().numpy()\n uncertain_area = np.ones_like(pred, dtype=np.uint8)\n uncertain_area[pred<1.0/255.0] = 0\n uncertain_area[pred>1-1.0/255.0] = 0\n\n for n in range(N):\n uncertain_area_ = uncertain_area[n,0,:,:] # H, W\n if train_mode:\n width = np.random.randint(1, rand_width)\n else:\n width = rand_width // 2\n uncertain_area_ = cv2.dilate(uncertain_area_, Kernels[width])\n uncertain_area[n,0,:,:] = uncertain_area_\n\n weight = np.zeros_like(uncertain_area)\n weight[uncertain_area == 1] = 1\n weight = torch.from_numpy(weight).cuda()\n\n return weight"
},
{
"identifier": "interactiveMattingTransform",
"path": "util/custom_transforms.py",
"snippet": "class interactiveMattingTransform(object):\n # modified from transform.py of dingrunyu\n def __init__(self, channel, no_crop = False, diff_width = False,\\\n relax_crop = 50, zero_pad_crop = True, use_iogpoints = False,\\\n use_roimasking = False, use_trimap = False, use_bbox = False,\\\n use_in_point = False, use_iogdextr = False, use_extreme_points = False,\\\n use_scribble = False, rotate_degree = 30, scale = [0.8, 1.25], shear = 10,\\\n flip = 0.5, crop_size = 512, trimap_type = 'standard', mask_type = 'alpha',\\\n bbox_type = 'area', trimap_one_hot=True):\n self.channel = channel\n self.no_crop = no_crop\n self.diff_width = diff_width\n self.relax_crop = relax_crop\n self.zero_pad_crop = zero_pad_crop\n self.rotate_degree = rotate_degree\n self.scale = scale\n self.shear = shear\n self.flip = flip\n self.crop_size = crop_size\n self.trimap_type = trimap_type\n self.mask_type = mask_type\n self.bbox_type = bbox_type\n self.use_roimasking = use_roimasking\n self.trimap_one_hot = trimap_one_hot\n\n self.use_trimap = use_trimap\n self.use_extreme_points = use_extreme_points\n self.use_bbox = use_bbox\n self.use_in_point = use_in_point\n self.use_iogpoints = use_iogpoints\n self.use_iogdextr = use_iogdextr\n self.use_scribble = use_scribble\n\n def getTrainTransform(self):\n transform_tr = [\n RandomAffine(degrees=self.rotate_degree, scale=self.scale, shear=self.shear, flip=self.flip), \n GenTrimap(),\n RandomCrop(output_size=(self.crop_size, self.crop_size)),\n RandomJitter(),\n Composite(),\n GenMask(mask_type=self.mask_type),\n MattingToTensor(phase=\"train\", trimap_type=self.trimap_type, in_channels=self.channel, trimap_one_hot=self.trimap_one_hot)]\n\n tr_ep = ExtremePoints(sigma=10, pert=5, elem='alpha')\n tr_in = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='in')\n tr_out = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='out')\n tr_bbox = OutPoints(sigma=10, pert=5, elem='alpha')\n tr_crop = CropFromMask(crop_elems=('alpha', 'fg'), \\\n relax=self.relax_crop, zero_pad=self.zero_pad_crop, \\\n crop=False if self.no_crop else True, use_roimasking = self.use_roimasking,\\\n is_matting=True)\n tr_scribble = GenerateScribble(elem='alpha')\n\n if not self.no_crop:\n transform_tr.insert(0, tr_crop)\n\n if self.channel == 5 and self.use_iogpoints:\n print('Use foreground/background points')\n transform_tr.insert(-3, tr_in)\n transform_tr.insert(-3, tr_out)\n transform_tr.insert(-3, ToImage(norm_elem=('in_points', 'out_points')))\n\n elif self.channel == 6 and self.use_trimap:\n print('Use trimap (one-hot)')\n\n elif self.channel == 4 and self.use_trimap:\n print('Use trimap')\n\n elif self.channel == 4 and self.use_bbox:\n print('Use bounding box')\n if self.bbox_type == 'points':\n transform_tr.insert(-3, tr_out)\n elif self.bbox_type == 'area':\n transform_tr.insert(-3, tr_bbox)\n else:\n raise RuntimeError('Wrong bbox type.')\n transform_tr.insert(-3, ToImage(norm_elem=('out_points')))\n\n elif self.channel == 4 and self.use_in_point:\n print('Use inside point')\n transform_tr.insert(-3, tr_in)\n transform_tr.insert(-3, ToImage(norm_elem=('in_points')))\n\n elif self.channel == 4 and self.use_extreme_points:\n print('Use extreme points')\n transform_tr.insert(-3, tr_ep)\n transform_tr.insert(-3, ToImage(norm_elem='extreme_points'))\n\n elif self.channel == 4 and self.use_scribble:\n print('Use scribble')\n transform_tr.insert(-3, tr_scribble)\n transform_tr.insert(-3, ToImage(norm_elem='scribble'))\n\n elif self.channel == 3:\n print('Use no annotation')\n \n else:\n raise NotImplementedError('Wrong interactive method.')\n\n print([str(tran) for tran in transform_tr])\n return transforms.Compose(transform_tr)\n\n def getTestTransform(self, reserveGT = False):\n transform_ts = [\n MattingToTensor(phase=\"test\", in_channels=self.channel, trimap_one_hot=self.trimap_one_hot)]\n\n tr_ep = ExtremePoints(sigma=10, pert=5, elem='alpha')\n tr_in = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='in')\n tr_out = IOGPoints(sigma=10, pert=5, elem='alpha', p_type='out')\n tr_bbox = OutPoints(sigma=10, pert=5, elem='alpha')\n tr_crop = CropFromMask(crop_elems=('image', 'alpha', 'trimap'), \\\n relax=self.relax_crop, zero_pad=self.zero_pad_crop, \\\n crop=False if self.no_crop else True, use_roimasking = self.use_roimasking,\\\n is_matting=True)\n tr_scribble = GenerateScribble(elem='alpha')\n\n if not self.no_crop:\n transform_ts.insert(0, tr_crop)\n\n if self.channel == 5 and self.use_iogpoints:\n print('Use foreground/background points')\n transform_ts.insert(-1, tr_in)\n transform_ts.insert(-1, tr_out)\n transform_ts.insert(-1, ToImage(norm_elem=('in_points', 'out_points')))\n\n elif self.channel == 6 and self.use_trimap:\n print('Use trimap (one-hot)')\n\n elif self.channel == 4 and self.use_trimap:\n print('Use trimap')\n\n elif self.channel == 4 and self.use_bbox:\n print('Use bounding box')\n if self.bbox_type == 'points':\n transform_ts.insert(-1, tr_out)\n elif self.bbox_type == 'area':\n transform_ts.insert(-1, tr_bbox)\n else:\n raise RuntimeError('Wrong bbox type.')\n transform_ts.insert(-1, ToImage(norm_elem=('out_points')))\n\n elif self.channel == 4 and self.use_in_point:\n print('Use inside point')\n transform_ts.insert(-1, tr_in)\n transform_ts.insert(-1, ToImage(norm_elem=('in_points')))\n\n elif self.channel == 4 and self.use_extreme_points:\n print('Use extreme points')\n transform_ts.insert(-1, tr_ep)\n transform_ts.insert(-1, ToImage(norm_elem='extreme_points'))\n\n elif self.channel == 4 and self.use_scribble:\n print('Use scribble')\n transform_ts.insert(-1, tr_scribble)\n transform_ts.insert(-1, ToImage(norm_elem='scribble'))\n \n elif self.channel == 3:\n print('Use no annotation')\n\n else:\n raise NotImplementedError('Wrong interactive method.')\n \n print([str(tran) for tran in transform_ts])\n return transforms.Compose(transform_ts)"
},
{
"identifier": "dataset",
"path": "util/dataset.py",
"snippet": "IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\n BASE_DIR = 'Combined_Dataset'\nclass Composition1KMatting(Dataset):\nclass Combined4ClassesMatting(Dataset):\nclass RealWorldMatting(Dataset):\n def __init__(self,\n root='dataset',\n split='train',\n task='matting',\n num_bgs=10,\n transform=None,\n preprocess=False,\n retname=True): \n def __getitem__(self, index):\n def _composite_fg(self, fg, alpha, idx):\n def _composite(self, fg, bg, a, w, h, trimap):\n def __len__(self):\n def __str__(self):\n def __init__(self,\n root='dataset',\n split='all',\n transform=None,\n retname=True): \n def __getitem__(self, index):\n def __len__(self):\n def __str__(self):\n def __init__(self,\n root='dataset',\n transform=None,\n retname=True): \n def __getitem__(self, index):\n def __len__(self):\n def __str__(self):"
},
{
"identifier": "config",
"path": "util/config.py",
"snippet": "class CfgNode(dict):\n def __init__(self, init_dict=None, key_list=None, new_allowed=False):\n def __getattr__(self, name):\n def __setattr__(self, name, value):\n def __str__(self):\n def _indent(s_, num_spaces):\n def __repr__(self):\ndef load_cfg_from_cfg_file(file):\ndef merge_cfg_from_list(cfg, cfg_list):\ndef _decode_cfg_value(v):\ndef _check_and_coerce_cfg_value_type(replacement, original, key, full_key):\n def conditional_cast(from_type, to_type):\ndef _assert_with_logging(cond, msg):"
},
{
"identifier": "helpers",
"path": "util/helpers.py",
"snippet": "def dilate(im, kernel=20):\ndef tens2image(im):\ndef crop2fullmask(crop_mask, bbox, im=None, im_size=None, zero_pad=False, relax=0, mask_relax=True,\n #interpolation=cv2.INTER_CUBIC, scikit=False):\n interpolation=cv2.INTER_LINEAR, scikit=False):\ndef align2fullmask(crop_mask, im_size, points, relax=0):\ndef overlay_mask(im, ma, colors=None, alpha=0.5):\ndef overlay_masks(im, masks, alpha=0.5):\ndef extreme_points(mask, pert):\n def find_point(id_x, id_y, ids):\ndef getPositon(distance_transform):\ndef in_points(mask, pert):\ndef out_points(mask, pert):\ndef out_points_mask(mask, pert):\ndef get_bbox(mask, points=None, pad=0, zero_pad=False, use_roimasking=False):\ndef crop_from_bbox(img, bbox, zero_pad=False, use_roimasking=False):\ndef fixed_resize(sample, resolution, flagval=None):\ndef crop_from_mask(img, mask, relax=0, zero_pad=False, use_roimasking = False):\ndef make_gaussian(size, sigma=10, center=None, d_type=np.float64):\ndef make_gt(img, labels, sigma=10, one_mask_per_point=False):\ndef make_gt_bbox(img, labels, sigma=10, one_mask_per_point=False):\ndef cstm_normalize(im, max_value):\ndef generate_param_report(logfile, param):\ndef color_map(N=256, normalized=False):\n def bitget(byteval, idx):\ndef save_mask(results, mask_path):\ndef B_spline(control_points, num_i, s=0.5):\ndef generate_scribble_strictly(mask, num_c=3, num_i=50, coverage_area=0.1, width=10, best_out_of=5):\ndef generate_trimap_with_gaussian(mask):\ndef clamp(input, min=None, max=None):\ndef produce_trimap(mask):\ndef unified_trimap_transform(trimap, sample_name, split_dir):\n H, W = mask.shape"
}
] | import numpy as np
import os.path
import logging
import argparse
import cv2
import torch.nn.parallel
import numpy as np
import util.helpers as helpers
import os
import random
import time
import cv2
import numpy as np
import logging
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.multiprocessing as mp
import torch.distributed as dist
from PIL import Image
from util import dataset
from util.util import AverageMeter, compute_mse, compute_sad, compute_gradient, compute_connectivity, get_cuda_devices, get_unknown_tensor_from_pred
from torch.nn.functional import upsample
from torchvision import transforms
from tensorboardX import SummaryWriter
from util.custom_transforms import interactiveMattingTransform
from util import dataset, config, helpers
from model.mattingnet import Unified_Interactive_Matting
from model.mattingnet import Unified_Interactive_Matting_trimap | 5,527 | #coding=utf-8
#import apex
def sort_dict(dict_src):
dict_new = {}
for k in sorted(dict_src):
dict_new.update({k: dict_src[k]})
return dict_new
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file')
parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def get_relax_pad(relax_pad, extreme_points):
if relax_pad <= 0:
return 0
if relax_pad >= 1:
return int(relax_pad)
x_min, y_min = np.min(extreme_points, axis=0)
x_max, y_max = np.max(extreme_points, axis=0)
x_len = x_max - x_min + 1
y_len = y_max - y_min + 1
return max(20, int(relax_pad * max(x_len, y_len)))
def main():
global args, logger, writer
use_void_pixels=True
logger = get_logger()
args = get_parser()
# writer = SummaryWriter(args.save_folder)
if args.test_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
else:
args.test_gpu = get_cuda_devices()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(args)# 在屏幕上打印信息
if args.manual_seed is not None:
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
cudnn.benchmark = False
cudnn.deterministic = True
# transform and dataloader
_interactive_matting_transform = interactiveMattingTransform(channel=args.in_channels, no_crop=args.no_crop, relax_crop=args.relax_crop,\
use_iogpoints=args.use_iogpoints, use_roimasking=args.use_roimasking, use_trimap=args.use_trimap,\
use_in_point=args.use_in_point, use_bbox=args.use_bbox, use_iogdextr=args.use_iogdextr, use_extreme_points=args.use_extreme_points, use_scribble=args.use_scribble,\
rotate_degree=args.rotate_degree, scale=args.scale, shear=args.shear,\
flip=args.flip, crop_size=args.crop_size, mask_type=args.mask_type, bbox_type=args.bbox_type)
composed_transforms_ts = _interactive_matting_transform.getTestTransform()
val_data = dataset.Composition1KMatting(root=args.data_root, split=args.test_split,transform=composed_transforms_ts, task=args.task, num_bgs=args.test_num_bgs)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_test, shuffle=False, num_workers=args.workers_test, pin_memory=True, sampler=None)
# model
if args.arch == 'uim':
model = Unified_Interactive_Matting(n_classes=args.classes, in_channels=args.in_channels, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, fusion_method=args.fusion_method)
elif args.arch == 'uim_trimap':
model = Unified_Interactive_Matting_trimap(n_classes=args.classes, in_channels=args.in_channels, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, fusion_method=args.fusion_method)
else:
raise RuntimeError('Wrong arch.')
logger.info(model)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
model = model.to(device)
model.eval()
# checkpoint
model_path = args.model_path
if os.path.isfile(model_path):
logger.info("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint)
logger.info("=> loaded checkpoint '{}'".format(model_path))
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(model_path))
# evaluate
print('evaluating Network')
eval_result = dict()
| #coding=utf-8
#import apex
def sort_dict(dict_src):
dict_new = {}
for k in sorted(dict_src):
dict_new.update({k: dict_src[k]})
return dict_new
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file')
parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def get_relax_pad(relax_pad, extreme_points):
if relax_pad <= 0:
return 0
if relax_pad >= 1:
return int(relax_pad)
x_min, y_min = np.min(extreme_points, axis=0)
x_max, y_max = np.max(extreme_points, axis=0)
x_len = x_max - x_min + 1
y_len = y_max - y_min + 1
return max(20, int(relax_pad * max(x_len, y_len)))
def main():
global args, logger, writer
use_void_pixels=True
logger = get_logger()
args = get_parser()
# writer = SummaryWriter(args.save_folder)
if args.test_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
else:
args.test_gpu = get_cuda_devices()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(args)# 在屏幕上打印信息
if args.manual_seed is not None:
random.seed(args.manual_seed)
np.random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
torch.cuda.manual_seed(args.manual_seed)
torch.cuda.manual_seed_all(args.manual_seed)
cudnn.benchmark = False
cudnn.deterministic = True
# transform and dataloader
_interactive_matting_transform = interactiveMattingTransform(channel=args.in_channels, no_crop=args.no_crop, relax_crop=args.relax_crop,\
use_iogpoints=args.use_iogpoints, use_roimasking=args.use_roimasking, use_trimap=args.use_trimap,\
use_in_point=args.use_in_point, use_bbox=args.use_bbox, use_iogdextr=args.use_iogdextr, use_extreme_points=args.use_extreme_points, use_scribble=args.use_scribble,\
rotate_degree=args.rotate_degree, scale=args.scale, shear=args.shear,\
flip=args.flip, crop_size=args.crop_size, mask_type=args.mask_type, bbox_type=args.bbox_type)
composed_transforms_ts = _interactive_matting_transform.getTestTransform()
val_data = dataset.Composition1KMatting(root=args.data_root, split=args.test_split,transform=composed_transforms_ts, task=args.task, num_bgs=args.test_num_bgs)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=args.batch_size_test, shuffle=False, num_workers=args.workers_test, pin_memory=True, sampler=None)
# model
if args.arch == 'uim':
model = Unified_Interactive_Matting(n_classes=args.classes, in_channels=args.in_channels, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, fusion_method=args.fusion_method)
elif args.arch == 'uim_trimap':
model = Unified_Interactive_Matting_trimap(n_classes=args.classes, in_channels=args.in_channels, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, fusion_method=args.fusion_method)
else:
raise RuntimeError('Wrong arch.')
logger.info(model)
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
model = model.to(device)
model.eval()
# checkpoint
model_path = args.model_path
if os.path.isfile(model_path):
logger.info("=> loading checkpoint '{}'".format(model_path))
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint)
logger.info("=> loaded checkpoint '{}'".format(model_path))
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(model_path))
# evaluate
print('evaluating Network')
eval_result = dict() | eval_result['all_mse'] = AverageMeter() | 1 | 2023-12-07 09:03:48+00:00 | 8k |
dvmazur/mixtral-offloading | src/build_model.py | [
{
"identifier": "ExpertCache",
"path": "src/expert_cache.py",
"snippet": "class ExpertCache:\n def __init__(self, make_module: callable, main_size: int, offload_size: int, buffer_size: int):\n \"\"\"Dynamically loads an array of modules with identical hyperparameters\"\"\"\n self.module_type = self.module_size = self.device = None\n self.active = False\n\n self.registered_experts: Dict[ExpertUID, ExpertInfo] = dict()\n\n self.main_modules = [self._check_module(make_module()) for i in range(main_size)]\n self.main_infos: List[Optional[ExpertInfo]] = [None for _ in range(main_size)]\n\n assert self.module_size is not None\n self.offloaded_storages = [\n torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(offload_size)]\n self.offloaded_infos: List[Optional[ExpertInfo]] = [None for _ in range(offload_size)]\n\n # temporary storage to shave off latency\n self.device_expert_buffers = deque([self._check_module(make_module()) for _ in range(buffer_size)])\n self.offloaded_storage_buffers = deque([\n torch.UntypedStorage(self.module_size).pin_memory(self.device) for _ in range(buffer_size)])\n self.group_infos: Dict[int, EvictionGroupInfo] = defaultdict(EvictionGroupInfo)\n\n def _check_module(self, module: MixtralExpertWrapper):\n assert isinstance(module.storage, torch.UntypedStorage)\n if self.module_type is None:\n self.module_type = type(module)\n self.module_size = len(module.storage)\n self.device = module.storage.device\n else:\n assert isinstance(module, self.module_type)\n assert len(module.storage) == self.module_size\n assert module.storage.device == self.device\n return module\n\n def add_expert(self, uid: ExpertUID, module: MixtralExpertWrapper, eviction_group: int = 0,\n offload: Optional[bool] = None):\n \"\"\"Register an expert to the cache and associate it with uid\"\"\"\n assert self.module_type is not None\n assert isinstance(module, self.module_type)\n return self.add_expert_storage(uid, module.storage, eviction_group=eviction_group, offload=offload)\n\n def add_expert_storage(self, uid: ExpertUID, storage: torch.UntypedStorage,\n eviction_group: int = 0, offload: Optional[bool] = None):\n assert uid not in self.registered_experts, f\"expert {uid} already registered\"\n assert isinstance(storage, torch.UntypedStorage)\n assert len(storage) == self.module_size\n\n if offload is None or not offload: # False or None\n for i in range(len(self.main_modules)):\n if self.main_infos[i] is None:\n self.main_modules[i].storage.copy_(storage)\n info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=False, index=i)\n self.registered_experts[uid] = self.main_infos[i] = info\n self.group_infos[eviction_group].add(info)\n return # done allocating; found spot on device\n if offload is None or offload: # True or None\n for i in range(len(self.offloaded_storages)):\n if self.offloaded_infos[i] is None:\n self.offloaded_storages[i].copy_(storage)\n info = ExpertInfo(uid, eviction_group=eviction_group, offloaded=True, index=i)\n self.registered_experts[uid] = self.offloaded_infos[i] = info\n self.group_infos[eviction_group].add(info)\n return # done allocating; found an offloaded spot\n raise ValueError(\"Cache is full\")\n\n def load_experts(\n self, *uids: ExpertUID, unordered: bool = False) -> Iterator[Tuple[ExpertUID, MixtralExpertWrapper]]:\n \"\"\"\n :example:\n >>> for uid, expert in expert_cache.load_experts(*list_of_uids, unordered=True):\n >>> for uid, expert in expert_iter:\n >>> result += expert(x) * get_moe_weight(uid)\n\n :param uids: iterate over the specified expert uids. Same uids as in add_expert\n :param unordered: if True, allows cache to iterate experts in arbitrary order\n The order is chosen to minimize the total wait time.\n :returns: an iterator that yields (uid, expert) pairs, only usable inside the for loop\n\n \"\"\"\n assert len(set(uids)) == len(uids)\n assert not self.active, \"already loading experts; buffers are busy\"\n if unordered: # yield non-offloaded experts first\n uids = sorted(uids, key=lambda uid: self.registered_experts[uid].offloaded)\n infos = [self.registered_experts[uid] for uid in uids]\n\n assert len(set(info.eviction_group for info in infos)) == 1, \"experts must be in the same evicton group\"\n eviction_group = self.group_infos[infos[0].eviction_group]\n for info in infos:\n eviction_group.mark_used(info)\n\n try:\n self.active = True\n # save pre-loaded experts before they can be swapped\n pre_loaded_infos = deque([info for info in infos if not info.offloaded])\n pre_loaded_experts = deque([self.main_modules[info.index] for info in pre_loaded_infos])\n\n # begin loading experts into free buffers in background (via non-blocking copy)\n infos_to_load = deque([info for info in infos if info.offloaded])\n infos_in_loading = deque([])\n experts_in_loading = deque([])\n window_size = min(len(self.device_expert_buffers) - 1,\n len(eviction_group.main_infos),\n len(infos_to_load))\n for _ in range(window_size):\n info_to_load = infos_to_load.popleft()\n infos_in_loading.append(info_to_load)\n experts_in_loading.append(\n self._swap(info_to_load, eviction_group.choose_expert_to_evict()))\n\n for info in infos:\n if len(pre_loaded_infos) > 0 and info is pre_loaded_infos[0]:\n pre_loaded_infos.popleft()\n yield (info.uid, pre_loaded_experts.popleft())\n elif len(infos_in_loading) > 0 and info is infos_in_loading[0]:\n infos_in_loading.popleft()\n yield (info.uid, experts_in_loading.popleft())\n if len(infos_to_load) > 0:\n info_to_load = infos_to_load.popleft()\n infos_in_loading.append(info_to_load)\n experts_in_loading.append(\n self._swap(info_to_load, eviction_group.choose_expert_to_evict()))\n else:\n raise RuntimeError(\"internal error: caching algorithm failed\")\n finally:\n self.active = False\n\n def _swap(self, info_to_load: ExpertInfo, info_to_evict: ExpertInfo) -> nn.Module:\n \"\"\"Swap an offloaded expert (info_to_load) with an on-device expert (info_to_evict) return the loaded expert\"\"\"\n assert info_to_load.offloaded and not info_to_evict.offloaded\n assert info_to_load.eviction_group == info_to_evict.eviction_group\n # swap a single on-device expert with a single offloaded expert using buffers for parallelism\n offloaded_storage_buffer = self.offloaded_storage_buffers.popleft()\n device_expert_buffer = self.device_expert_buffers.popleft()\n device_expert_buffer.storage.copy_(self.offloaded_storages[info_to_load.index], non_blocking=True)\n offloaded_storage_buffer.copy_(self.main_modules[info_to_evict.index].storage, non_blocking=True)\n\n self.device_expert_buffers.append(self.main_modules[info_to_evict.index])\n self.main_modules[info_to_evict.index] = device_expert_buffer\n self.offloaded_storage_buffers.append(self.offloaded_storages[info_to_load.index])\n self.offloaded_storages[info_to_load.index] = offloaded_storage_buffer\n\n self.main_infos[info_to_evict.index] = info_to_load\n self.offloaded_infos[info_to_load.index] = info_to_evict\n info_to_evict.offloaded, info_to_load.offloaded = info_to_load.offloaded, info_to_evict.offloaded\n info_to_evict.index, info_to_load.index = info_to_load.index, info_to_evict.index\n self.group_infos[info_to_load.eviction_group].swap(info_to_load, info_to_evict)\n return device_expert_buffer"
},
{
"identifier": "MixtralExpertWrapper",
"path": "src/expert_wrapper.py",
"snippet": "class MixtralExpertWrapper(nn.Module):\n def __init__(\n self,\n expert_module: tp.Any,\n device: torch.device,\n ):\n super().__init__()\n \n expert_module, self.storage = self.replace_layer_storage(expert_module, device)\n self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)\n \n self._register_state_dict_hook(self._add_storage_to_state_dict_hook)\n self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)\n \n @staticmethod\n def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):\n state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)\n return state_dict\n \n def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())\n del state_dict[prefix + 'storage']\n \n def forward(self, *args, **kwargs):\n return self.expert_module(*args, **kwargs)\n \n \n @staticmethod\n def replace_layer_storage(\n layer: tp.Any,\n device: torch.device,\n ):\n state_dict = {\n f\"w{i}\": {\n \"W_q\": getattr(layer, f\"w{i}\").W_q,\n \"meta\": getattr(layer, f\"w{i}\").meta,\n \"bias\": getattr(layer, f\"w{i}\").bias,\n }\n for i in range(1, 4)\n }\n\n storage_size = 0\n offsets = [0]\n\n for x in nested_flatten(state_dict):\n if not isinstance(x, torch.Tensor):\n continue\n storage_size += x.nbytes\n offsets.append(storage_size)\n\n storage = torch.UntypedStorage(storage_size, device=device) \n\n i = 0\n new_flattened_states = list()\n for x in nested_flatten(state_dict):\n if not isinstance(x, torch.Tensor):\n new_flattened_states.append(x)\n continue\n\n start = offsets[i]\n end = offsets[i + 1]\n a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)\n a_view[...] = x\n assert a_view.data_ptr() == storage.data_ptr() + start\n i += 1\n new_flattened_states.append(a_view)\n\n state_dict = nested_pack(new_flattened_states, state_dict)\n\n for layer_id, states in state_dict.items():\n patched = getattr(layer, layer_id)\n patched.W_q = states[\"W_q\"]\n patched.meta = states[\"meta\"]\n patched.bias = states[\"bias\"]\n setattr(layer, layer_id, patched)\n\n return layer, storage"
},
{
"identifier": "HQQLinearTritonSavable",
"path": "src/custom_layers.py",
"snippet": "class HQQLinearTritonSavable(HQQLinear):\n def __init__(self, layer, quant_config, meta=None, **kwargs):\n \"\"\"\n Example how to get meta:\n >>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)\n >>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)\n \"\"\"\n \n assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4]\n \n super().__init__(layer, quant_config, **kwargs)\n \n if not hasattr(self, 'meta'):\n assert meta is not None\n self.meta = copy.deepcopy(meta)\n \n self._register_state_dict_hook(self._add_to_state_dict_hook)\n self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook)\n \n def quantize(self, *args, **kwargs):\n super().quantize(*args, **kwargs)\n \n # repacking\n self.repack()\n \n def repack(self):\n if self.W_q.shape != self.meta['shape']:\n W_q = Quantizer.unpack[self.meta['packing']](self.W_q)\n sh = self.meta['shape']\n W_q = W_q.reshape((-1,) + sh[1:])\n W_q = W_q[:sh[0], ...]\n self.W_q = Quantizer.pack[self.meta['packing']](W_q)\n \n def forward(self, x):\n return self.forward_triton(x)\n \n def set_backend(self, backend):\n pass\n \n @torch.inference_mode()\n def forward_triton(self, x):\n assert self.ready, \"model was not quantized\"\n assert self.meta['axis'] == 0\n\n W_q, meta = self.W_q, self.meta\n\n del_keys = []\n if 'quant_scale' in meta and meta['quant_scale']:\n meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')\n if 'quant_zero' in meta and meta['quant_zero']:\n meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')\n\n K = meta['shape'][1]\n N = meta['shape'][0]\n \n if self.meta['nbits'] == 4:\n fn = triton_matmul4_transpose\n elif self.meta['nbits'] == 3:\n fn = functools.partial(triton_matmul3_transpose, N=N)\n elif self.meta['nbits'] == 2:\n fn = triton_matmul2_transpose\n else:\n raise RuntimeError(f\"nbits == {self.meta['nbits']} isn't yet supported\")\n \n output = fn(\n meta['group_size'], x,\n W_q.view(-1, K),\n meta['scale'].view(-1, K),\n meta['zero'].view(-1, K),\n bias=self.bias if hasattr(self, 'bias') else None,\n )\n\n #Cleanup\n for key in del_keys:\n del meta[key]\n\n return output\n\n # to support .forward_pytorch(...) - backward compatibility\n @torch.inference_mode()\n def dequantize(self):\n assert self.ready, \"model was not quantized\"\n W_q, meta = self.W_q, self.meta\n del_keys = []\n if(meta['quant_scale']):\n meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')\n if(meta['quant_zero']):\n meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')\n \n W_q_p = Quantizer.unpack[meta['packing']](W_q).half()\n W_q_p = W_q_p[:meta['shape'][0], ...]\n W_q_p = W_q_p.reshape((meta['group_size'], -1))\n \n if((meta['group_size'] is not None) and (meta['nbits']==3)):\n W_q_p = W_q_p[:meta['group_size']] if (meta['axis']==0) else W_q_p[:,:meta['group_size']]\n W_est = ((W_q_p - meta['zero'])*meta['scale']).reshape(meta['shape']) \n \n #Cleanup\n del W_q_p\n for key in del_keys: del meta[key]\n return W_est\n \n @classmethod\n def get_hqq_meta(cls, linear_shape, quant_config):\n layer = HQQLinear(nn.Linear(*linear_shape, bias=False), quant_config)\n meta = layer.meta\n\n def _remove_tensors_recursive(d):\n keys = list(d.keys())\n\n for k in keys:\n if isinstance(d[k], torch.Tensor):\n del d[k]\n elif isinstance(d[k], dict):\n _remove_tensors_recursive(d[k])\n\n _remove_tensors_recursive(meta)\n\n return meta\n \n @staticmethod\n def _add_to_state_dict_hook(self, state_dict, prefix, local_metadata):\n tensor_paths = self._get_tensor_paths(self.meta)\n assert set(tensor_paths).issubset(\n {'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',\n 'scale', 'zero'}\n )\n \n def _add(name, value):\n state_dict[prefix + name] = value\n \n _add('W_q', self.W_q)\n \n if self.bias is not None:\n _add('bias', self.bias)\n \n if 'meta_scale' in self.meta:\n _add('meta.scale_q', self.meta['scale_q'])\n _add('meta.meta_scale.scale', self.meta['meta_scale']['scale'])\n _add('meta.meta_scale.zero', self.meta['meta_scale']['zero'])\n else:\n _add('meta.scale', self.meta['scale'])\n \n if 'meta_zero' in self.meta:\n _add('meta.zero_q', self.meta['zero_q'])\n _add('meta.meta_zero.scale', self.meta['meta_zero']['scale'])\n _add('meta.meta_zero.zero', self.meta['meta_zero']['zero'])\n else:\n _add('meta.zero', self.meta['zero'])\n \n return state_dict\n \n def _load_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):\n tensor_paths = [k[len(prefix + 'meta.'):] for k in state_dict.keys() if k.startswith(prefix + 'meta.')]\n assert set(tensor_paths).issubset(\n {'scale_q', 'meta_scale.scale', 'meta_scale.zero', 'zero_q', 'meta_zero.scale', 'meta_zero.zero',\n 'scale', 'zero'}\n )\n \n def _del(name):\n del state_dict[prefix + name]\n def _set(name):\n setattr(self, name, state_dict[prefix + name])\n _del(name)\n def _get(name):\n v = state_dict[prefix + name]\n _del(name)\n return v\n \n _set('W_q')\n if 'bias' in state_dict:\n _set('bias')\n else:\n self.bias = None\n \n if not hasattr(self, 'meta'):\n self.meta = {}\n \n if (prefix + 'meta.meta_scale.scale') in state_dict:\n self.meta['scale_q'] = _get('meta.scale_q')\n self.meta['quant_scale'] = True\n if not 'meta_scale' in self.meta:\n self.meta['meta_scale'] = {}\n self.meta['meta_scale'] |= {\n 'scale': _get('meta.meta_scale.scale'),\n 'zero': _get('meta.meta_scale.zero')\n }\n else:\n self.meta['scale'] = _get('meta.scale')\n if (prefix + 'meta.meta_zero.scale') in state_dict:\n self.meta['zero_q'] = _get('meta.zero_q')\n self.meta['quant_zero'] = True\n if not 'meta_zero' in self.meta:\n self.meta['meta_zero'] = {}\n self.meta['meta_zero'] |= {\n 'scale': _get('meta.meta_zero.scale'),\n 'zero': _get('meta.meta_zero.zero')\n }\n else:\n self.meta['zero'] = _get('meta.zero')\n self.ready = True\n \n # self.cuda()\n # self.in_gpu = self.W_q.device.type == 'cuda'\n # assert self.in_gpu\n \n self.repack()\n \n @classmethod\n def _get_tensor_paths(cls, state: Dict[str, Any], prefix=''):\n paths = []\n \n for k, v in state.items():\n if isinstance(v, dict):\n paths += cls._get_tensor_paths(v, prefix=k + '.')\n elif isinstance(v, torch.Tensor):\n paths.append(prefix + k)\n \n return paths\n \n def state_dict(self, *args, **kwargs):\n return nn.Module.state_dict(self, *args, **kwargs)\n \n def load_state_dict(self, *args, **kwargs):\n nn.Module.load_state_dict(self, *args, **kwargs)"
},
{
"identifier": "MixtralBLockSparseTop2MLP_HQQ",
"path": "src/custom_layers.py",
"snippet": "class MixtralBLockSparseTop2MLP_HQQ(nn.Module):\n def __init__(self, config: MixtralConfig, quant_config: Dict[str, Any], meta1, meta2):\n super().__init__()\n \n self.w1 = HQQLinearTritonSavable(None, quant_config, meta1)\n self.w2 = HQQLinearTritonSavable(None, quant_config, meta2)\n self.w3 = HQQLinearTritonSavable(None, quant_config, meta1)\n\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)\n current_hidden_states = self.w2(current_hidden_states)\n return current_hidden_states"
},
{
"identifier": "SparseMoeWrapper",
"path": "src/custom_layers.py",
"snippet": "class SparseMoeWrapper(nn.Module):\n def __init__(self, config, layer_id, gate, expert_cache):\n super().__init__()\n\n self.hidden_dim = config.hidden_size\n self.ffn_dim = config.intermediate_size\n self.num_experts = config.num_local_experts\n self.top_k = config.num_experts_per_tok\n self.layer_id = layer_id\n\n self.gate = gate\n self.experts = expert_cache\n\n def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:\n batch_size, sequence_length, hidden_dim = hidden_states.shape\n hidden_states = hidden_states.view(-1, hidden_dim)\n # router_logits: (batch * sequence_length, n_experts)\n router_logits = self.gate(hidden_states)\n\n routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)\n routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)\n routing_weights /= routing_weights.sum(dim=-1, keepdim=True)\n # we cast back to the input dtype\n routing_weights = routing_weights.to(hidden_states.dtype)\n\n final_hidden_states = torch.zeros(\n (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device\n )\n\n # One hot encode the selected experts to create an expert mask\n # this will be used to easily index which expert is going to be sollicitated\n expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)\n\n active_experts = selected_experts.flatten().unique().tolist()\n\n # Loop over all available experts in the model and perform the computation on each expert\n for (_layer_index, expert_idx), expert_layer in self.experts.load_experts(\n *((self.layer_id, expert_idx) for expert_idx in active_experts), unordered=True):\n idx, top_x = torch.where(expert_mask[expert_idx])\n assert top_x.shape[0] > 0\n\n # in torch it is faster to index using lists than torch tensors\n top_x_list = top_x.tolist()\n idx_list = idx.tolist()\n\n # Index the correct hidden states and compute the expert hidden state for\n # the current expert. We need to make sure to multiply the output hidden\n # states by `routing_weights` on the corresponding tokens (top-1 and top-2)\n current_state = hidden_states[None, top_x_list].reshape(-1, hidden_dim)\n current_hidden_states = expert_layer(current_state) * routing_weights[top_x_list, idx_list, None]\n\n # However `index_add_` only support torch tensors for indexing so we'll use\n # the `top_x` tensor here.\n final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))\n final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)\n return final_hidden_states, router_logits"
},
{
"identifier": "with_default_dtype",
"path": "src/utils.py",
"snippet": "@contextmanager\ndef with_default_dtype(dtype):\n _dtype_original = torch.get_default_dtype()\n\n try:\n torch.set_default_dtype(dtype)\n yield\n finally:\n torch.set_default_dtype(_dtype_original)"
}
] | import os
import json
import typing as tp
import torch
from functools import cache
from dataclasses import dataclass
from torch import nn
from transformers import AutoConfig
from transformers.models.mixtral import MixtralForCausalLM, MixtralConfig
from safetensors.torch import load_file
from torch import nn
from tqdm.auto import trange
from hqq.core.quantize import BaseQuantizeConfig
from .expert_cache import ExpertCache
from .expert_wrapper import MixtralExpertWrapper
from .custom_layers import (
HQQLinearTritonSavable,
MixtralBLockSparseTop2MLP_HQQ,
SparseMoeWrapper,
)
from .utils import with_default_dtype | 6,774 |
@dataclass(frozen=True)
class OffloadConfig:
main_size: int
offload_size: int
buffer_size: int
offload_per_layer: int
class QuantConfig:
def __init__(
self,
ffn_config: BaseQuantizeConfig,
attn_config: BaseQuantizeConfig,
):
self.ffn_config = ffn_config
self.attn_config = attn_config
@cache
def get_ffn_metas(self, hidden_dim: int, ffn_dim: int) -> tuple[tp.Any, tp.Any]:
return (
HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), self.ffn_config),
HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), self.ffn_config),
)
def replace_attn_layers(
model: MixtralForCausalLM,
config: MixtralConfig,
quant_config: QuantConfig,
device: torch.device,
) -> None:
attn_quant_config = quant_config.attn_config
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_dim = hidden_size // num_heads
num_key_value_heads = config.num_key_value_heads
shapes = [
(hidden_size, num_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(num_heads * head_dim, hidden_size),
]
shape_to_meta = {
shape: HQQLinearTritonSavable.get_hqq_meta(shape, attn_quant_config)
for shape in shapes
}
def patch_fct_hqq(shape, quant_config):
meta = shape_to_meta[shape]
layer = HQQLinearTritonSavable(None, quant_config, meta=meta)
return layer
for layer in model.model.layers:
layer.block_sparse_moe.gate = nn.Linear(
config.hidden_size,
config.num_local_experts,
dtype=torch.float16,
device=device,
bias=False,
)
layer.self_attn.q_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
layer.self_attn.k_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.v_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.o_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
@cache
def get_default_ffn_quant_config(ffn_dim: int = 14336, hidden_dim: int = 4096):
quant_config = BaseQuantizeConfig(
nbits=2,
group_size=16,
quant_zero=True,
quant_scale=True,
)
meta1 = HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
meta2 = HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
return quant_config, meta1, meta2
def make_empty_expert(
model_config: MixtralConfig, quant_config: QuantConfig
|
@dataclass(frozen=True)
class OffloadConfig:
main_size: int
offload_size: int
buffer_size: int
offload_per_layer: int
class QuantConfig:
def __init__(
self,
ffn_config: BaseQuantizeConfig,
attn_config: BaseQuantizeConfig,
):
self.ffn_config = ffn_config
self.attn_config = attn_config
@cache
def get_ffn_metas(self, hidden_dim: int, ffn_dim: int) -> tuple[tp.Any, tp.Any]:
return (
HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), self.ffn_config),
HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), self.ffn_config),
)
def replace_attn_layers(
model: MixtralForCausalLM,
config: MixtralConfig,
quant_config: QuantConfig,
device: torch.device,
) -> None:
attn_quant_config = quant_config.attn_config
hidden_size = config.hidden_size
num_heads = config.num_attention_heads
head_dim = hidden_size // num_heads
num_key_value_heads = config.num_key_value_heads
shapes = [
(hidden_size, num_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(hidden_size, num_key_value_heads * head_dim),
(num_heads * head_dim, hidden_size),
]
shape_to_meta = {
shape: HQQLinearTritonSavable.get_hqq_meta(shape, attn_quant_config)
for shape in shapes
}
def patch_fct_hqq(shape, quant_config):
meta = shape_to_meta[shape]
layer = HQQLinearTritonSavable(None, quant_config, meta=meta)
return layer
for layer in model.model.layers:
layer.block_sparse_moe.gate = nn.Linear(
config.hidden_size,
config.num_local_experts,
dtype=torch.float16,
device=device,
bias=False,
)
layer.self_attn.q_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
layer.self_attn.k_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.v_proj = patch_fct_hqq(
(hidden_size, num_key_value_heads * head_dim), attn_quant_config
)
layer.self_attn.o_proj = patch_fct_hqq(
(hidden_size, num_heads * head_dim), attn_quant_config
)
@cache
def get_default_ffn_quant_config(ffn_dim: int = 14336, hidden_dim: int = 4096):
quant_config = BaseQuantizeConfig(
nbits=2,
group_size=16,
quant_zero=True,
quant_scale=True,
)
meta1 = HQQLinearTritonSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
meta2 = HQQLinearTritonSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
return quant_config, meta1, meta2
def make_empty_expert(
model_config: MixtralConfig, quant_config: QuantConfig | ) -> MixtralBLockSparseTop2MLP_HQQ: | 3 | 2023-12-15 03:32:35+00:00 | 8k |
CircleRadon/Osprey | osprey/datasets/data_modules.py | [
{
"identifier": "IGNORE_INDEX",
"path": "osprey/constants.py",
"snippet": "IGNORE_INDEX = -100"
},
{
"identifier": "COCODataset",
"path": "osprey/datasets/stage2_data.py",
"snippet": "class COCODataset(CustomDataset):\n\n def __init__(self,\n tokenizer=None,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n max_gt_per_img=20,\n ):\n\n super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)\n self.begin_str = '<image>\\nIn the conversation below, you simply answer the category name based on what you see ' \\\n 'in the imagery inside a particular region. I will give you only one region each time.\\n' "
},
{
"identifier": "RefCOCO",
"path": "osprey/datasets/stage2_data.py",
"snippet": "class RefCOCO(CustomDataset):\n\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n max_gt_per_img=15,\n ):\n\n super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)\n\n self.begin_str = '<image>\\nI will provide you with only one region ' \\\n 'containing only one object, although there may be other ' \\\n 'objects present in the image. It is recommended that you ' \\\n \"describe the object's relative position with respect to other \" \\\n 'objects in the image, as well as its position within ' \\\n 'the image and its basic attributes.'\n\n def load_annotations(self, ann_file):\n\n self.coco = COCO(ann_file)\n self.img_ids = self.coco.getImgIds()\n data_infos = []\n total_ann_ids = []\n for i in self.img_ids:\n info = self.coco.loadImgs([i])[0]\n\n info['filename'] = info['file_name'].split('_')[-1]\n info['height'] = int(info['height'])\n info['width'] = int(info['width'])\n\n ann_ids = self.coco.getAnnIds(imgIds=[i])\n ann_info = self.coco.loadAnns(ann_ids)\n if len(ann_info)==0:\n continue\n \n data_infos.append(info)\n total_ann_ids.extend(ann_ids)\n assert len(set(total_ann_ids)) == len(\n total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n return data_infos\n \n def get_data_item(self, idx):\n data_info = self.data_infos[idx]\n ann_info = self.get_ann_info(idx)\n\n img_path =os.path.join(self.img_prefix, data_info['filename'])\n image = self.read_process_image(img_path)\n\n gt_masks = []\n gt_labels = []\n for ann in ann_info:\n mask = self.annToMask(ann['segmentation'], data_info['height'], data_info['width'])\n gt_masks.append(mask)\n \n cat = self.coco.loadCats(ann['category_id'])\n gt_labels.append(data_info['caption'])\n\n data_item = dict(\n img = image,\n gt_masks = gt_masks,\n gt_labels = gt_labels\n )\n return data_item"
},
{
"identifier": "RefCOCOP",
"path": "osprey/datasets/stage2_data.py",
"snippet": "class RefCOCOP(RefCOCO):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n max_gt_per_img=15,\n ):\n super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)\n self.begin_str = '<image>\\nI will provide you with only one region ' \\\n 'containing only one object, although there may be other ' \\\n 'objects present in the image. It is recommended that you ' \\\n \"describe the object's relative position with respect to other \" \\\n 'objects in the image and its basic attibuts, you should not ' \\\n 'give its position within the image.' "
},
{
"identifier": "VCRDataset",
"path": "osprey/datasets/vcr.py",
"snippet": "class VCRDataset(Dataset):\n CLASSES = ('object',)\n\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n\n ):\n super(VCRDataset, self).__init__()\n\n\n self.img_prefix = img_prefix\n\n self.tokenizer = tokenizer\n\n self.data_args = data_args\n\n self.begin_str = \"\"\"<image>.\\nThis provides an overview of the picture.\\n\"\"\"\n self.data_infos = self.load_annotations(ann_file)\n print('normal_vcr', len(self.data_infos))\n\n def load_annotations(self, ann_file):\n\n with open(ann_file, 'r') as f:\n ann_list = [json.loads(line) for line in f]\n data_infos = []\n\n import re\n\n def replace_numbers_with_tags(s, class_names):\n pattern = r'\\b(\\d+)\\b'\n try:\n result = re.sub(pattern, lambda match: f'{class_names[int(match.group(1))]} at region{match.group(1)}', s)\n except:\n # contain number not for instance\n return None\n return result\n\n\n for ann in ann_list:\n\n metadata_fn_path = ann['metadata_fn']\n img_fn = ann['img_fn']\n img_path = os.path.join(self.img_prefix,img_fn)\n annotations = json.load(open(os.path.join(self.img_prefix, metadata_fn_path)))\n masks = annotations['segms']\n bboxes = np.array(annotations['boxes'])\n\n class_names = ann['objects']\n num_objects = len(class_names)\n ref_string = ''\n for i in range(num_objects):\n ref_string = ref_string + f'region{i+1} <mask><pos>' + ','\n ref_string = ref_string[:-1]\n ref_prefix = random.choice(Ref_WAY)\n\n begion_string = ref_prefix.replace('<region>', ref_string)\n qa_s = []\n\n q = ann['question_orig']\n q = replace_numbers_with_tags(q, class_names)\n a = ann['answer_orig']\n a = replace_numbers_with_tags(a, class_names)\n why = ann['rationale_orig']\n why = replace_numbers_with_tags(why, class_names)\n if (q is None) or (a is None) or (why) is None:\n continue\n\n\n qa_s.append({'from': 'human', 'value': begion_string + q})\n qa_s.append({'from': 'gpt', 'value': a})\n qa_s.append({'from': 'human', 'value': random.choice(WHY_QUESTIONS)})\n qa_s.append({'from': 'gpt', 'value': why})\n\n data_infos.append(dict(\n img_path = img_path,\n bboxes = bboxes,\n masks = masks,\n labels= class_names,\n qas = qa_s)\n )\n\n\n return data_infos\n\n def __len__(self):\n return len(self.data_infos)\n\n def __getitem__(self, i):\n data_info = self.data_infos[i]\n\n img_path = data_info['img_path']\n masks = data_info['masks']\n bboxes = data_info['bboxes']\n\n qas = data_info['qas']\n processor = self.data_args.image_processor\n image = Image.open(img_path).convert('RGB')\n w, h = image.size\n # TODO ablation this\n\n image_file = img_path\n\n pred_masks = np.zeros((len(masks), h, w))\n for i,mask in enumerate(masks):\n\n int_box = [round(box) for box in bboxes[i][:-1]]\n \n height_ = int(int_box[3]-int_box[1])\n width_ = int(int_box[2]-int_box[0])\n box_mask = make_mask(height_, width_, bboxes[i], mask)\n\n pred_masks[i, int_box[1]:int_box[3], int_box[0]:int_box[2]] = box_mask\n\n image = processor.preprocess(image,\n do_center_crop=False,\n return_tensors='pt')['pixel_values'][0]\n\n image = torch.nn.functional.interpolate(image.unsqueeze(0),\n size=(512, 512),\n mode='bilinear',\n align_corners=False).squeeze(0)\n\n cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16) # FIXME: 16 is hardcoded patch size\n qas = copy.deepcopy(qas)\n qas[0]['value'] = self.begin_str + qas[0]['value']\n\n sources = preprocess_multimodal(\n copy.deepcopy([qas]),\n self.data_args, cur_token_len)\n\n data_dict = preprocess(\n sources,\n self.tokenizer,\n has_image=True)\n if isinstance(i, int):\n data_dict = dict(input_ids=data_dict['input_ids'][0],\n labels=data_dict['labels'][0])\n\n data_dict['image'] = image\n data_dict['masks'] = torch.Tensor(pred_masks)\n\n return data_dict"
},
{
"identifier": "VGDATA",
"path": "osprey/datasets/vg.py",
"snippet": "class VGDATA(CustomDataset):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n max_gt_per_img=3,\n ):\n\n self.data_args = data_args\n self.tokenizer = tokenizer\n self.ann_file = ann_file\n self.img_prefix = img_prefix\n self.max_gt_per_img = max_gt_per_img\n\n super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)\n\n self.begin_str = \"\"\"<image>\\nThis provides an overview of the picture.\\n\"\"\"\n\n\n def get_data_item(self, idx):\n data_info = self.data_infos[idx]\n ann_info = self.get_ann_info(idx)\n\n img_path = os.path.join(self.img_prefix, data_info['filename'])\n image = self.read_process_image(img_path)\n\n gt_labels = []\n gt_masks_ann = []\n\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n mask = self.annToMask(ann['segmentation'], data_info['height'], data_info['width'])\n \n gt_labels.append(ann['caption'])\n gt_masks_ann.append(mask)\n\n\n data_item = dict(\n img = image,\n gt_labels=gt_labels,\n gt_masks=gt_masks_ann\n )\n return data_item\n\n \n def process_text(self, data_item):\n image = data_item['img']\n ori_labels = data_item['gt_labels']\n ori_masks = np.array(data_item['gt_masks'])\n ori_masks = torch.from_numpy(ori_masks) \n\n shuffle_ids = torch.randperm(len(ori_labels))\n if len(shuffle_ids) > self.max_gt_per_img:\n shuffle_ids = shuffle_ids[:self.max_gt_per_img]\n ori_masks = ori_masks[shuffle_ids]\n ori_labels = [ori_labels[i] for i in shuffle_ids]\n\n sources = dict()\n\n sources['conversations'] = []\n\n for i in range(len(ori_labels)):\n question = random.choice(QUESTIONS).strip()\n question = question.replace('<region>', '<mask><pos>')\n if i == 0:\n question = self.begin_str + question\n question += LIMIT\n answer = ori_labels[i]\n sources['conversations'].append(\n {'from': 'human', 'value': question})\n sources['conversations'].append({'from': 'gpt', 'value': answer})\n\n cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)\n\n sources = preprocess_multimodal(\n copy.deepcopy([sources['conversations']]),\n self.data_args,\n cur_token_len)\n # print(sources)\n\n data_dict = preprocess(\n sources,\n self.tokenizer,\n has_image=True\n )\n \n # get single\n if isinstance(i, int):\n data_dict = dict(input_ids=data_dict['input_ids'][0],\n labels=data_dict['labels'][0])\n\n data_dict['image'] = image\n data_dict['masks'] = ori_masks\n return data_dict"
},
{
"identifier": "PascalPart",
"path": "osprey/datasets/stage2_data.py",
"snippet": "class PascalPart(CustomDataset):\n\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n max_gt_per_img=15,\n ):\n\n super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)\n CAT_CLASSES = ('potted plant', 'aeroplane', 'cow', 'cat', 'bus', 'horse', 'car', \n 'dog', 'bicycle', 'person', 'bird', 'bottle', 'sheep', 'motorbike')\n\n SUB_CLASSES = ('eye', 'window', 'cap', 'headlight', 'hand', 'mirror', 'arm', 'plant', \n 'wheel', 'ear', 'pot', 'foot', 'leg', 'nose', 'body', 'horn', 'handlebar', \n 'neck', 'license plate', 'paw', 'saddle', 'head', 'muzzle', 'tail', 'wing', \n 'beak', 'hair', 'torso', 'door', 'mouth')\n\n begin_str = '<image>\\n In the conversation below, you simply answer the category and subcategory name based on what you see' \\\n 'in the image inside a particular region. It maybe a subpart of an object. '\\\n 'I will give you only one region each time. Your answer should in the format of '\\\n 'category:subcategory. '\n class_str = 'Categories Containing '+', '.join(CAT_CLASSES)+ '. '\n subclass_str = 'Subcategories Containing ' + ','.join(SUB_CLASSES)\n self.begin_str = begin_str + class_str + subclass_str + '.\\n'"
},
{
"identifier": "PartImagenet",
"path": "osprey/datasets/stage2_data.py",
"snippet": "class PartImagenet(CustomDataset):\n\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n max_gt_per_img=15,\n ):\n\n super().__init__(tokenizer, data_args, ann_file, img_prefix, max_gt_per_img)\n CAT_CLASSES = (\n 'Bottle', 'Biped', 'Quadruped', 'Fish', 'Reptile', 'Bicycle', 'Bird', 'Car', 'Boat', 'Snake', 'Aeroplane'\n )\n\n SUB_CLASSES = (\n 'Tier', 'Hand', 'Wing', 'Mouth', 'Tail', 'Side', 'Fin', 'Engine', 'Foot', 'Head', 'Body', 'Sail', 'Seat'\n )\n\n begin_str = '<image>\\nIn the conversation below, you simply answer the category and subcategory name based on what you see' \\\n 'in the image inside a particular region. It maybe a subpart of an object. '\\\n 'I will give you only one region each time. Your answer should in the format of '\\\n 'category subcategory. '\n class_str = 'Categories Containing '+', '.join(CAT_CLASSES)+ '. '\n subclass_str = 'Subcategories Containing ' + ','.join(SUB_CLASSES)\n self.begin_str = begin_str + class_str + subclass_str + '.\\n'"
},
{
"identifier": "OspreyDetailedDescription",
"path": "osprey/datasets/osprey_724k.py",
"snippet": "class OspreyDetailedDescription(ConversationDataset):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n ):\n super().__init__(tokenizer, data_args, ann_file, img_prefix)\n\n def load_annotations(self, ann_file):\n data_infos = []\n ann_list = json.load(open(ann_file))\n\n for ann in ann_list:\n masks = []\n qa_s = []\n filename = ann['file_name'].split('_')[-1]\n img_path = os.path.join(self.img_prefix, filename)\n region_num = len(ann['annotation'])\n h, w = ann['height'], ann['width']\n for i in range(region_num):\n mask = ann['annotation'][i]['segmentation']\n masks.append(mask)\n\n question = random.choice(DETAILED_QUESTIONS)\n question = question.replace('<region>', '<mask><pos>')\n if i==0:\n qa_s.append({'from': 'human', 'value': self.begin_str+question}) \n else:\n qa_s.append({'from': 'human', 'value': question}) \n \n answer = re.findall(r\"<.*>:\\ (.*)\", ann['description'][i])[0]\n \n qa_s.append({'from': 'gpt', 'value': answer})\n\n data_infos.append(dict(\n img_path = img_path,\n masks = masks,\n height = h,\n width = w,\n qas = qa_s\n ))\n return data_infos"
},
{
"identifier": "OspreyConversations",
"path": "osprey/datasets/osprey_724k.py",
"snippet": "class OspreyConversations(ConversationDataset):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n ):\n self.limit = \"\"\n super().__init__(tokenizer, data_args, ann_file, img_prefix)"
},
{
"identifier": "OspreyShortForm",
"path": "osprey/datasets/osprey_724k.py",
"snippet": "class OspreyShortForm(ConversationDataset):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n ):\n self.limit = ' Answer the question using a single word or phrase.'\n super().__init__(tokenizer, data_args, ann_file, img_prefix)"
},
{
"identifier": "OspreyPartLevel",
"path": "osprey/datasets/osprey_724k.py",
"snippet": "class OspreyPartLevel(ConversationDataset):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n ):\n self.limit = ' Answer the question using a single word or phrase.'\n super().__init__(tokenizer, data_args, ann_file, img_prefix)"
},
{
"identifier": "OspreyLVISPosNeg",
"path": "osprey/datasets/osprey_724k.py",
"snippet": "class OspreyLVISPosNeg(ConversationDataset):\n def __init__(self,\n tokenizer,\n data_args=None,\n ann_file=None,\n img_prefix=None,\n ):\n \n super().__init__(tokenizer, data_args, ann_file, img_prefix)\n\n def load_annotations(self, ann_file):\n data_infos = []\n ann_list = json.load(open(ann_file))\n\n for ann in ann_list:\n if len(ann['conversations'])//2 ==0:\n continue\n masks = []\n qa_s = []\n filename = ann['file_name']\n img_path = os.path.join(self.img_prefix, filename)\n region_num = len(ann['annotation'])\n h, w = ann['height'], ann['width']\n\n for i in range(region_num):\n mask = ann['annotation'][i]['segmentation']\n masks.append(mask)\n \n for i in range(len(ann['conversations'])//2):\n \n question = ann['conversations'][i*2]['value']\n question = re.sub(r'<region\\d+>', '<mask><pos>', question)\n if i==0:\n question = self.begin_str+question\n qa_s.append({'from': 'human', 'value': question}) \n \n answer = ann['conversations'][i*2+1]['value']\n qa_s.append({'from': 'gpt', 'value': answer})\n\n data_infos.append(dict(\n img_path = img_path,\n masks = masks,\n height = h,\n width = w,\n qas = qa_s\n ))\n # print(qa_s)\n\n return data_infos"
}
] | from dataclasses import dataclass
from torch.utils.data import ConcatDataset
from osprey.constants import IGNORE_INDEX
from .stage2_data import COCODataset, RefCOCO, RefCOCOP
from .vcr import VCRDataset
from .vg import VGDATA
from .stage2_data import PascalPart
from .stage2_data import PartImagenet
from .osprey_724k import OspreyDetailedDescription, OspreyConversations, OspreyShortForm, OspreyPartLevel, OspreyLVISPosNeg
import torch
import transformers
import json | 5,697 | class DataCollatorForDetDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances):
input_ids, labels, img_metas, masks = tuple([instance.get(key,None) for instance in instances]
for key in ('input_ids',
'labels',
'img_metas',
'masks'))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
img_metas=img_metas,
masks = masks
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
def make_multitask_data_module(tokenizer,
data_args) :
"""Make dataset and collator for supervised fine-tuning."""
if data_args.dataset_config is not None:
dataset_config = json.load(open(data_args.dataset_config))
train_dataset = build_osprey_dataset(dataset_config,
tokenizer=tokenizer,
data_args=data_args)
data_collator = DataCollatorForDetDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def build_osprey_dataset(dataset_config,
tokenizer=None,
data_args=None,
**kwargs):
if isinstance(dataset_config, list):
datasets = []
for cfg in dataset_config:
temp_dataset = build_osprey_dataset(cfg, tokenizer=tokenizer, data_args=data_args, **kwargs)
datasets.append(temp_dataset)
for dataset in datasets:
print(type(dataset), f'len = {len(dataset)}')
return ConcatDataset(datasets)
dataset_type = dataset_config.pop('type')
if dataset_type == 'coco_data':
dataset = COCODataset(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'vcr':
dataset = VCRDataset(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'VGDATA':
dataset = VGDATA(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'RefCOCO':
dataset = RefCOCO(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'RefCOCOP':
dataset = RefCOCOP(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'PascalPart':
dataset = PascalPart(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'PartImagenet':
dataset = PartImagenet(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'OspreyDetailedDescription':
|
@dataclass
class DataCollatorForDetDataset(object):
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances):
input_ids, labels, img_metas, masks = tuple([instance.get(key,None) for instance in instances]
for key in ('input_ids',
'labels',
'img_metas',
'masks'))
input_ids = torch.nn.utils.rnn.pad_sequence(
input_ids,
batch_first=True,
padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels,
batch_first=True,
padding_value=IGNORE_INDEX)
batch = dict(
input_ids=input_ids,
labels=labels,
attention_mask=input_ids.ne(self.tokenizer.pad_token_id),
img_metas=img_metas,
masks = masks
)
if 'image' in instances[0]:
images = [instance['image'] for instance in instances]
if all(x is not None and x.shape == images[0].shape for x in images):
batch['images'] = torch.stack(images)
else:
batch['images'] = images
return batch
def make_multitask_data_module(tokenizer,
data_args) :
"""Make dataset and collator for supervised fine-tuning."""
if data_args.dataset_config is not None:
dataset_config = json.load(open(data_args.dataset_config))
train_dataset = build_osprey_dataset(dataset_config,
tokenizer=tokenizer,
data_args=data_args)
data_collator = DataCollatorForDetDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset,
eval_dataset=None,
data_collator=data_collator)
def build_osprey_dataset(dataset_config,
tokenizer=None,
data_args=None,
**kwargs):
if isinstance(dataset_config, list):
datasets = []
for cfg in dataset_config:
temp_dataset = build_osprey_dataset(cfg, tokenizer=tokenizer, data_args=data_args, **kwargs)
datasets.append(temp_dataset)
for dataset in datasets:
print(type(dataset), f'len = {len(dataset)}')
return ConcatDataset(datasets)
dataset_type = dataset_config.pop('type')
if dataset_type == 'coco_data':
dataset = COCODataset(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'vcr':
dataset = VCRDataset(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'VGDATA':
dataset = VGDATA(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'RefCOCO':
dataset = RefCOCO(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'RefCOCOP':
dataset = RefCOCOP(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'PascalPart':
dataset = PascalPart(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'PartImagenet':
dataset = PartImagenet(
**dataset_config,
tokenizer=tokenizer,
data_args=data_args,
**kwargs,
)
elif dataset_type == 'OspreyDetailedDescription': | dataset = OspreyDetailedDescription( | 8 | 2023-12-17 16:21:45+00:00 | 8k |
open-mmlab/PIA | animatediff/models/unet_blocks.py | [
{
"identifier": "Transformer3DModel",
"path": "animatediff/models/attention.py",
"snippet": "class Transformer3DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n # Define input layers\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n\n # Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n if use_linear_projection:\n self.proj_out = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):\n # Input\n assert hidden_states.dim() == 5, f\"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}.\"\n video_length = hidden_states.shape[2]\n hidden_states = rearrange(hidden_states, \"b c f h w -> (b f) c h w\")\n encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)\n\n batch, channel, height, weight = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = self.proj_in(hidden_states)\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n\n # Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n video_length=video_length\n )\n\n # Output\n if not self.use_linear_projection:\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n hidden_states = self.proj_out(hidden_states)\n else:\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n\n output = hidden_states + residual\n\n output = rearrange(output, \"(b f) c h w -> b c f h w\", f=video_length)\n if not return_dict:\n return (output,)\n\n return Transformer3DModelOutput(sample=output)"
},
{
"identifier": "Downsample3D",
"path": "animatediff/models/resnet.py",
"snippet": "class Downsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n\n if use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n else:\n raise NotImplementedError\n\n def forward(self, hidden_states):\n assert hidden_states.shape[1] == self.channels\n if self.use_conv and self.padding == 0:\n raise NotImplementedError\n\n assert hidden_states.shape[1] == self.channels\n hidden_states = self.conv(hidden_states)\n\n return hidden_states"
},
{
"identifier": "ResnetBlock3D",
"path": "animatediff/models/resnet.py",
"snippet": "class ResnetBlock3D(nn.Module):\n def __init__(\n self,\n *,\n in_channels,\n out_channels=None,\n conv_shortcut=False,\n dropout=0.0,\n temb_channels=512,\n groups=32,\n groups_out=None,\n pre_norm=True,\n eps=1e-6,\n non_linearity=\"swish\",\n time_embedding_norm=\"default\",\n output_scale_factor=1.0,\n use_in_shortcut=None,\n ):\n super().__init__()\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.time_embedding_norm = time_embedding_norm\n self.output_scale_factor = output_scale_factor\n\n if groups_out is None:\n groups_out = groups\n\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n\n self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if temb_channels is not None:\n if self.time_embedding_norm == \"default\":\n time_emb_proj_out_channels = out_channels\n elif self.time_embedding_norm == \"scale_shift\":\n time_emb_proj_out_channels = out_channels * 2\n else:\n raise ValueError(f\"unknown time_embedding_norm : {self.time_embedding_norm} \")\n\n self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)\n else:\n self.time_emb_proj = None\n\n self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n self.dropout = torch.nn.Dropout(dropout)\n self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if non_linearity == \"swish\":\n self.nonlinearity = lambda x: F.silu(x)\n elif non_linearity == \"mish\":\n self.nonlinearity = Mish()\n elif non_linearity == \"silu\":\n self.nonlinearity = nn.SiLU()\n\n self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut\n\n self.conv_shortcut = None\n if self.use_in_shortcut:\n self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, input_tensor, temb):\n hidden_states = input_tensor\n\n hidden_states = self.norm1(hidden_states)\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.conv1(hidden_states)\n \n if temb is not None:\n temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]\n\n if temb is not None and self.time_embedding_norm == \"default\":\n hidden_states = hidden_states + temb\n\n hidden_states = self.norm2(hidden_states)\n\n if temb is not None and self.time_embedding_norm == \"scale_shift\":\n scale, shift = torch.chunk(temb, 2, dim=1)\n hidden_states = hidden_states * (1 + scale) + shift\n\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor = self.conv_shortcut(input_tensor)\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n\n return output_tensor"
},
{
"identifier": "Upsample3D",
"path": "animatediff/models/resnet.py",
"snippet": "class Upsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_conv_transpose = use_conv_transpose\n self.name = name\n\n conv = None\n if use_conv_transpose:\n raise NotImplementedError\n elif use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)\n\n def forward(self, hidden_states, output_size=None):\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv_transpose:\n raise NotImplementedError\n\n # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n\n # if `output_size` is passed we force the interpolation output\n # size and do not make use of `scale_factor=2`\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n\n # If the input is bfloat16, we cast back to bfloat16\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n\n # if self.use_conv:\n # if self.name == \"conv\":\n # hidden_states = self.conv(hidden_states)\n # else:\n # hidden_states = self.Conv2d_0(hidden_states)\n hidden_states = self.conv(hidden_states)\n\n return hidden_states"
},
{
"identifier": "get_motion_module",
"path": "animatediff/models/motion_module.py",
"snippet": "def get_motion_module(\n in_channels,\n motion_module_type: str,\n motion_module_kwargs: dict\n):\n if motion_module_type == \"Vanilla\":\n return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,)\n else:\n raise ValueError"
}
] | import torch
import pdb
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module | 4,545 | ):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append(
| # Adapted from https://github.com/guoyww/AnimateDiff
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
Transformer3DModel(
attn_num_head_channels,
in_channels // attn_num_head_channels,
in_channels=in_channels,
num_layers=1,
cross_attention_dim=cross_attention_dim,
norm_num_groups=resnet_groups,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
)
)
motion_modules.append( | get_motion_module( | 4 | 2023-12-21 03:29:34+00:00 | 8k |
3DTopia/OpenLRM | lrm/models/rendering/synthesizer.py | [
{
"identifier": "ImportanceRenderer",
"path": "lrm/models/rendering/utils/renderer.py",
"snippet": "class ImportanceRenderer(torch.nn.Module):\n \"\"\"\n Modified original version to filter out-of-box samples as TensoRF does.\n \n Reference:\n TensoRF: https://github.com/apchenstu/TensoRF/blob/main/models/tensorBase.py#L277\n \"\"\"\n def __init__(self):\n super().__init__()\n self.activation_factory = self._build_activation_factory()\n self.ray_marcher = MipRayMarcher2(self.activation_factory)\n self.plane_axes = generate_planes()\n\n def _build_activation_factory(self):\n def activation_factory(options: dict):\n if options['clamp_mode'] == 'softplus':\n return lambda x: F.softplus(x - 1) # activation bias of -1 makes things initialize better\n else:\n assert False, \"Renderer only supports `clamp_mode`=`softplus`!\"\n return activation_factory\n\n def _forward_pass(self, depths: torch.Tensor, ray_directions: torch.Tensor, ray_origins: torch.Tensor,\n planes: torch.Tensor, decoder: nn.Module, rendering_options: dict):\n \"\"\"\n Additional filtering is applied to filter out-of-box samples.\n Modifications made by Zexin He.\n \"\"\"\n\n # context related variables\n batch_size, num_rays, samples_per_ray, _ = depths.shape\n device = depths.device\n\n # define sample points with depths\n sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3)\n sample_coordinates = (ray_origins.unsqueeze(-2) + depths * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)\n\n # filter out-of-box samples\n mask_inbox = \\\n (rendering_options['sampler_bbox_min'] <= sample_coordinates) & \\\n (sample_coordinates <= rendering_options['sampler_bbox_max'])\n mask_inbox = mask_inbox.all(-1)\n\n # forward model according to all samples\n _out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)\n\n # set out-of-box samples to zeros(rgb) & -inf(sigma)\n SAFE_GUARD = 3\n DATA_TYPE = _out['sigma'].dtype\n colors_pass = torch.zeros(batch_size, num_rays * samples_per_ray, 3, device=device, dtype=DATA_TYPE)\n densities_pass = torch.nan_to_num(torch.full((batch_size, num_rays * samples_per_ray, 1), -float('inf'), device=device, dtype=DATA_TYPE)) / SAFE_GUARD\n colors_pass[mask_inbox], densities_pass[mask_inbox] = _out['rgb'][mask_inbox], _out['sigma'][mask_inbox]\n\n # reshape back\n colors_pass = colors_pass.reshape(batch_size, num_rays, samples_per_ray, colors_pass.shape[-1])\n densities_pass = densities_pass.reshape(batch_size, num_rays, samples_per_ray, densities_pass.shape[-1])\n\n return colors_pass, densities_pass\n\n def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options):\n # self.plane_axes = self.plane_axes.to(ray_origins.device)\n\n if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto':\n ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp'])\n is_ray_valid = ray_end > ray_start\n if torch.any(is_ray_valid).item():\n ray_start[~is_ray_valid] = ray_start[is_ray_valid].min()\n ray_end[~is_ray_valid] = ray_start[is_ray_valid].max()\n depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n else:\n # Create stratified depth samples\n depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n\n # Coarse Pass\n colors_coarse, densities_coarse = self._forward_pass(\n depths=depths_coarse, ray_directions=ray_directions, ray_origins=ray_origins,\n planes=planes, decoder=decoder, rendering_options=rendering_options)\n\n # Fine Pass\n N_importance = rendering_options['depth_resolution_importance']\n if N_importance > 0:\n _, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n depths_fine = self.sample_importance(depths_coarse, weights, N_importance)\n\n colors_fine, densities_fine = self._forward_pass(\n depths=depths_fine, ray_directions=ray_directions, ray_origins=ray_origins,\n planes=planes, decoder=decoder, rendering_options=rendering_options)\n\n all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse,\n depths_fine, colors_fine, densities_fine)\n\n # Aggregate\n rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options)\n else:\n rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n return rgb_final, depth_final, weights.sum(2)\n\n def run_model(self, planes, decoder, sample_coordinates, sample_directions, options):\n plane_axes = self.plane_axes.to(planes.device)\n sampled_features = sample_from_planes(plane_axes, planes, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])\n\n out = decoder(sampled_features, sample_directions)\n if options.get('density_noise', 0) > 0:\n out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise']\n return out\n\n def run_model_activated(self, planes, decoder, sample_coordinates, sample_directions, options):\n out = self.run_model(planes, decoder, sample_coordinates, sample_directions, options)\n out['sigma'] = self.activation_factory(options)(out['sigma'])\n return out\n\n def sort_samples(self, all_depths, all_colors, all_densities):\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n return all_depths, all_colors, all_densities\n\n def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2):\n all_depths = torch.cat([depths1, depths2], dim = -2)\n all_colors = torch.cat([colors1, colors2], dim = -2)\n all_densities = torch.cat([densities1, densities2], dim = -2)\n\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n\n return all_depths, all_colors, all_densities\n\n def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False):\n \"\"\"\n Return depths of approximately uniformly spaced samples along rays.\n \"\"\"\n N, M, _ = ray_origins.shape\n if disparity_space_sampling:\n depths_coarse = torch.linspace(0,\n 1,\n depth_resolution,\n device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = 1/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse)\n else:\n if type(ray_start) == torch.Tensor:\n depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3)\n depth_delta = (ray_end - ray_start) / (depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None]\n else:\n depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = (ray_end - ray_start)/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n\n return depths_coarse\n\n def sample_importance(self, z_vals, weights, N_importance):\n \"\"\"\n Return depths of importance sampled points along rays. See NeRF importance sampling for more.\n \"\"\"\n with torch.no_grad():\n batch_size, num_rays, samples_per_ray, _ = z_vals.shape\n\n z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray)\n weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher\n\n # smooth weights\n weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1)\n weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze()\n weights = weights + 0.01\n\n z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])\n importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1],\n N_importance).detach().reshape(batch_size, num_rays, N_importance, 1)\n return importance_z_vals\n\n def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):\n \"\"\"\n Sample @N_importance samples from @bins with distribution defined by @weights.\n Inputs:\n bins: (N_rays, N_samples_+1) where N_samples_ is \"the number of coarse samples per ray - 2\"\n weights: (N_rays, N_samples_)\n N_importance: the number of samples to draw from the distribution\n det: deterministic or not\n eps: a small number to prevent division by zero\n Outputs:\n samples: the sampled samples\n \"\"\"\n N_rays, N_samples_ = weights.shape\n weights = weights + eps # prevent division by zero (don't do inplace op!)\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)\n cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function\n cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)\n # padded to 0~1 inclusive\n\n if det:\n u = torch.linspace(0, 1, N_importance, device=bins.device)\n u = u.expand(N_rays, N_importance)\n else:\n u = torch.rand(N_rays, N_importance, device=bins.device)\n u = u.contiguous()\n\n inds = torch.searchsorted(cdf, u, right=True)\n below = torch.clamp_min(inds-1, 0)\n above = torch.clamp_max(inds, N_samples_)\n\n inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)\n cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)\n bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)\n\n denom = cdf_g[...,1]-cdf_g[...,0]\n denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled\n # anyway, therefore any value for it is fine (set to 1 here)\n\n samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])\n return samples"
},
{
"identifier": "RaySampler",
"path": "lrm/models/rendering/utils/ray_sampler.py",
"snippet": "class RaySampler(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None\n\n\n def forward(self, cam2world_matrix, intrinsics, render_size):\n \"\"\"\n Create batches of rays and return origins and directions.\n\n cam2world_matrix: (N, 4, 4)\n intrinsics: (N, 3, 3)\n render_size: int\n\n ray_origins: (N, M, 3)\n ray_dirs: (N, M, 2)\n \"\"\"\n\n N, M = cam2world_matrix.shape[0], render_size**2\n cam_locs_world = cam2world_matrix[:, :3, 3]\n fx = intrinsics[:, 0, 0]\n fy = intrinsics[:, 1, 1]\n cx = intrinsics[:, 0, 2]\n cy = intrinsics[:, 1, 2]\n sk = intrinsics[:, 0, 1]\n\n uv = torch.stack(torch.meshgrid(\n torch.arange(render_size, dtype=torch.float32, device=cam2world_matrix.device),\n torch.arange(render_size, dtype=torch.float32, device=cam2world_matrix.device),\n indexing='ij',\n ))\n uv = uv.flip(0).reshape(2, -1).transpose(1, 0)\n uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1)\n\n x_cam = uv[:, :, 0].view(N, -1) * (1./render_size) + (0.5/render_size)\n y_cam = uv[:, :, 1].view(N, -1) * (1./render_size) + (0.5/render_size)\n z_cam = torch.ones((N, M), device=cam2world_matrix.device)\n\n x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam\n y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam\n\n cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1)\n\n _opencv2blender = torch.tensor([\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1],\n ], dtype=torch.float32, device=cam2world_matrix.device).unsqueeze(0).repeat(N, 1, 1)\n\n cam2world_matrix = torch.bmm(cam2world_matrix, _opencv2blender)\n\n world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3]\n\n ray_dirs = world_rel_points - cam_locs_world[:, None, :]\n ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2)\n\n ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1)\n\n return ray_origins, ray_dirs"
}
] | import itertools
import torch
import torch.nn as nn
from .utils.renderer import ImportanceRenderer
from .utils.ray_sampler import RaySampler | 4,667 | # ORIGINAL LICENSE
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# Modified by Zexin He
# The modifications are subject to the same license as the original.
class OSGDecoder(nn.Module):
"""
Triplane decoder that gives RGB and sigma values from sampled features.
Using ReLU here instead of Softplus in the original implementation.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L112
"""
def __init__(self, n_features: int,
hidden_dim: int = 64, num_layers: int = 4, activation: nn.Module = nn.ReLU):
super().__init__()
self.net = nn.Sequential(
nn.Linear(3 * n_features, hidden_dim),
activation(),
*itertools.chain(*[[
nn.Linear(hidden_dim, hidden_dim),
activation(),
] for _ in range(num_layers - 2)]),
nn.Linear(hidden_dim, 1 + 3),
)
# init all bias to zero
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
def forward(self, sampled_features, ray_directions):
# Aggregate features by mean
# sampled_features = sampled_features.mean(1)
# Aggregate features by concatenation
_N, n_planes, _M, _C = sampled_features.shape
sampled_features = sampled_features.permute(0, 2, 1, 3).reshape(_N, _M, n_planes*_C)
x = sampled_features
N, M, C = x.shape
x = x.contiguous().view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
sigma = x[..., 0:1]
return {'rgb': rgb, 'sigma': sigma}
class TriplaneSynthesizer(nn.Module):
"""
Synthesizer that renders a triplane volume with planes and a camera.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L19
"""
DEFAULT_RENDERING_KWARGS = {
'ray_start': 'auto',
'ray_end': 'auto',
'box_warp': 2.,
'white_back': True,
'disparity_space_sampling': False,
'clamp_mode': 'softplus',
'sampler_bbox_min': -1.,
'sampler_bbox_max': 1.,
}
def __init__(self, triplane_dim: int, samples_per_ray: int):
super().__init__()
# attributes
self.triplane_dim = triplane_dim
self.rendering_kwargs = {
**self.DEFAULT_RENDERING_KWARGS,
'depth_resolution': samples_per_ray // 2,
'depth_resolution_importance': samples_per_ray // 2,
}
# renderings
self.renderer = ImportanceRenderer()
| # ORIGINAL LICENSE
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# Modified by Zexin He
# The modifications are subject to the same license as the original.
class OSGDecoder(nn.Module):
"""
Triplane decoder that gives RGB and sigma values from sampled features.
Using ReLU here instead of Softplus in the original implementation.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L112
"""
def __init__(self, n_features: int,
hidden_dim: int = 64, num_layers: int = 4, activation: nn.Module = nn.ReLU):
super().__init__()
self.net = nn.Sequential(
nn.Linear(3 * n_features, hidden_dim),
activation(),
*itertools.chain(*[[
nn.Linear(hidden_dim, hidden_dim),
activation(),
] for _ in range(num_layers - 2)]),
nn.Linear(hidden_dim, 1 + 3),
)
# init all bias to zero
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
def forward(self, sampled_features, ray_directions):
# Aggregate features by mean
# sampled_features = sampled_features.mean(1)
# Aggregate features by concatenation
_N, n_planes, _M, _C = sampled_features.shape
sampled_features = sampled_features.permute(0, 2, 1, 3).reshape(_N, _M, n_planes*_C)
x = sampled_features
N, M, C = x.shape
x = x.contiguous().view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
sigma = x[..., 0:1]
return {'rgb': rgb, 'sigma': sigma}
class TriplaneSynthesizer(nn.Module):
"""
Synthesizer that renders a triplane volume with planes and a camera.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L19
"""
DEFAULT_RENDERING_KWARGS = {
'ray_start': 'auto',
'ray_end': 'auto',
'box_warp': 2.,
'white_back': True,
'disparity_space_sampling': False,
'clamp_mode': 'softplus',
'sampler_bbox_min': -1.,
'sampler_bbox_max': 1.,
}
def __init__(self, triplane_dim: int, samples_per_ray: int):
super().__init__()
# attributes
self.triplane_dim = triplane_dim
self.rendering_kwargs = {
**self.DEFAULT_RENDERING_KWARGS,
'depth_resolution': samples_per_ray // 2,
'depth_resolution_importance': samples_per_ray // 2,
}
# renderings
self.renderer = ImportanceRenderer() | self.ray_sampler = RaySampler() | 1 | 2023-12-20 10:52:01+00:00 | 8k |
xinghaochen/TinySAM | tinysam/modeling/sam.py | [
{
"identifier": "TinyViT",
"path": "tinysam/modeling/tiny_vit_sam.py",
"snippet": "class TinyViT(nn.Module):\n def __init__(self, img_size=224, in_chans=3, num_classes=1000,\n embed_dims=[96, 192, 384, 768], depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_sizes=[7, 7, 14, 7],\n mlp_ratio=4.,\n drop_rate=0.,\n drop_path_rate=0.1,\n use_checkpoint=False,\n mbconv_expand_ratio=4.0,\n local_conv_size=3,\n layer_lr_decay=1.0,\n ):\n super().__init__()\n self.img_size=img_size\n self.num_classes = num_classes\n self.depths = depths\n self.num_layers = len(depths)\n self.mlp_ratio = mlp_ratio\n\n activation = nn.GELU\n\n self.patch_embed = PatchEmbed(in_chans=in_chans,\n embed_dim=embed_dims[0],\n resolution=img_size,\n activation=activation)\n\n patches_resolution = self.patch_embed.patches_resolution\n self.patches_resolution = patches_resolution\n\n # stochastic depth\n dpr = [x.item() for x in torch.linspace(0, drop_path_rate,\n sum(depths))] # stochastic depth decay rule\n\n # build layers\n self.layers = nn.ModuleList()\n for i_layer in range(self.num_layers):\n kwargs = dict(dim=embed_dims[i_layer],\n input_resolution=(patches_resolution[0] // (2 ** (i_layer-1 if i_layer == 3 else i_layer)),\n patches_resolution[1] // (2 ** (i_layer-1 if i_layer == 3 else i_layer))),\n # input_resolution=(patches_resolution[0] // (2 ** i_layer),\n # patches_resolution[1] // (2 ** i_layer)),\n depth=depths[i_layer],\n drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n downsample=PatchMerging if (\n i_layer < self.num_layers - 1) else None,\n use_checkpoint=use_checkpoint,\n out_dim=embed_dims[min(\n i_layer + 1, len(embed_dims) - 1)],\n activation=activation,\n )\n if i_layer == 0:\n layer = ConvLayer(\n conv_expand_ratio=mbconv_expand_ratio,\n **kwargs,\n )\n else:\n layer = BasicLayer(\n num_heads=num_heads[i_layer],\n window_size=window_sizes[i_layer],\n mlp_ratio=self.mlp_ratio,\n drop=drop_rate,\n local_conv_size=local_conv_size,\n **kwargs)\n self.layers.append(layer)\n\n # Classifier head\n self.norm_head = nn.LayerNorm(embed_dims[-1])\n self.head = nn.Linear(\n embed_dims[-1], num_classes) if num_classes > 0 else torch.nn.Identity()\n\n # init weights\n self.apply(self._init_weights)\n self.set_layer_lr_decay(layer_lr_decay)\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dims[-1],\n 256,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(256),\n nn.Conv2d(\n 256,\n 256,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(256),\n )\n def set_layer_lr_decay(self, layer_lr_decay):\n decay_rate = layer_lr_decay\n\n # layers -> blocks (depth)\n depth = sum(self.depths)\n lr_scales = [decay_rate ** (depth - i - 1) for i in range(depth)]\n #print(\"LR SCALES:\", lr_scales)\n\n def _set_lr_scale(m, scale):\n for p in m.parameters():\n p.lr_scale = scale\n\n self.patch_embed.apply(lambda x: _set_lr_scale(x, lr_scales[0]))\n i = 0\n for layer in self.layers:\n for block in layer.blocks:\n block.apply(lambda x: _set_lr_scale(x, lr_scales[i]))\n i += 1\n if layer.downsample is not None:\n layer.downsample.apply(\n lambda x: _set_lr_scale(x, lr_scales[i - 1]))\n assert i == depth\n for m in [self.norm_head, self.head]:\n m.apply(lambda x: _set_lr_scale(x, lr_scales[-1]))\n\n for k, p in self.named_parameters():\n p.param_name = k\n\n def _check_lr_scale(m):\n for p in m.parameters():\n assert hasattr(p, 'lr_scale'), p.param_name\n\n self.apply(_check_lr_scale)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.LayerNorm):\n nn.init.constant_(m.bias, 0)\n nn.init.constant_(m.weight, 1.0)\n\n @torch.jit.ignore\n def no_weight_decay_keywords(self):\n return {'attention_biases'}\n\n def forward_features(self, x):\n # x: (N, C, H, W)\n x = self.patch_embed(x)\n\n x = self.layers[0](x)\n start_i = 1\n\n for i in range(start_i, len(self.layers)):\n layer = self.layers[i]\n x = layer(x)\n B,_,C=x.size()\n x = x.view(B, 64, 64, C)\n x=x.permute(0, 3, 1, 2)\n x=self.neck(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n #x = self.norm_head(x)\n #x = self.head(x)\n return x"
},
{
"identifier": "ImageEncoderViT",
"path": "tinysam/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_dim: int = 768,\n depth: int = 12,\n num_heads: int = 12,\n mlp_ratio: float = 4.0,\n out_chans: int = 256,\n qkv_bias: bool = True,\n norm_layer: Type[nn.Module] = nn.LayerNorm,\n act_layer: Type[nn.Module] = nn.GELU,\n use_abs_pos: bool = True,\n use_rel_pos: bool = False,\n rel_pos_zero_init: bool = True,\n window_size: int = 0,\n global_attn_indexes: Tuple[int, ...] = (),\n ) -> None:\n \"\"\"\n Args:\n img_size (int): Input image size.\n patch_size (int): Patch size.\n in_chans (int): Number of input image channels.\n embed_dim (int): Patch embedding dimension.\n depth (int): Depth of ViT.\n num_heads (int): Number of attention heads in each ViT block.\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n qkv_bias (bool): If True, add a learnable bias to query, key, value.\n norm_layer (nn.Module): Normalization layer.\n act_layer (nn.Module): Activation layer.\n use_abs_pos (bool): If True, use absolute positional embeddings.\n use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n window_size (int): Window size for window attention blocks.\n global_attn_indexes (list): Indexes for blocks using global attention.\n \"\"\"\n super().__init__()\n self.img_size = img_size\n\n self.patch_embed = PatchEmbed(\n kernel_size=(patch_size, patch_size),\n stride=(patch_size, patch_size),\n in_chans=in_chans,\n embed_dim=embed_dim,\n )\n\n self.pos_embed: Optional[nn.Parameter] = None\n if use_abs_pos:\n # Initialize absolute positional embedding with pretrain image size.\n self.pos_embed = nn.Parameter(\n torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\n )\n\n self.blocks = nn.ModuleList()\n for i in range(depth):\n block = Block(\n dim=embed_dim,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n qkv_bias=qkv_bias,\n norm_layer=norm_layer,\n act_layer=act_layer,\n use_rel_pos=use_rel_pos,\n rel_pos_zero_init=rel_pos_zero_init,\n window_size=window_size if i not in global_attn_indexes else 0,\n input_size=(img_size // patch_size, img_size // patch_size),\n )\n self.blocks.append(block)\n\n self.neck = nn.Sequential(\n nn.Conv2d(\n embed_dim,\n out_chans,\n kernel_size=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n nn.Conv2d(\n out_chans,\n out_chans,\n kernel_size=3,\n padding=1,\n bias=False,\n ),\n LayerNorm2d(out_chans),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.patch_embed(x)\n if self.pos_embed is not None:\n x = x + self.pos_embed\n\n for blk in self.blocks:\n x = blk(x)\n\n x = self.neck(x.permute(0, 3, 1, 2))\n\n return x"
},
{
"identifier": "MaskDecoder",
"path": "tinysam/modeling/mask_decoder.py",
"snippet": "class MaskDecoder(nn.Module):\n def __init__(\n self,\n *,\n transformer_dim: int,\n transformer: nn.Module,\n num_multimask_outputs: int = 3,\n activation: Type[nn.Module] = nn.GELU,\n iou_head_depth: int = 3,\n iou_head_hidden_dim: int = 256,\n ) -> None:\n \"\"\"\n Predicts masks given an image and prompt embeddings, using a\n transformer architecture.\n\n Arguments:\n transformer_dim (int): the channel dimension of the transformer\n transformer (nn.Module): the transformer used to predict masks\n num_multimask_outputs (int): the number of masks to predict\n when disambiguating masks\n activation (nn.Module): the type of activation to use when\n upscaling masks\n iou_head_depth (int): the depth of the MLP used to predict\n mask quality\n iou_head_hidden_dim (int): the hidden dimension of the MLP\n used to predict mask quality\n \"\"\"\n super().__init__()\n self.transformer_dim = transformer_dim\n self.transformer = transformer\n\n self.num_multimask_outputs = num_multimask_outputs\n\n self.iou_token = nn.Embedding(1, transformer_dim)\n self.num_mask_tokens = num_multimask_outputs + 1\n self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n self.output_upscaling = nn.Sequential(\n nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n LayerNorm2d(transformer_dim // 4),\n activation(),\n nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n activation(),\n )\n self.output_hypernetworks_mlps = nn.ModuleList(\n [\n MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n for i in range(self.num_mask_tokens)\n ]\n )\n\n self.iou_prediction_head = MLP(\n transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n )\n\n def forward(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks given image and prompt embeddings.\n\n Arguments:\n image_embeddings (torch.Tensor): the embeddings from the image encoder\n image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n multimask_output (bool): Whether to return multiple masks or a single\n mask.\n\n Returns:\n torch.Tensor: batched predicted masks\n torch.Tensor: batched predictions of mask quality\n \"\"\"\n masks, iou_pred = self.predict_masks(\n image_embeddings=image_embeddings,\n image_pe=image_pe,\n sparse_prompt_embeddings=sparse_prompt_embeddings,\n dense_prompt_embeddings=dense_prompt_embeddings,\n )\n\n # Select the correct mask or masks for output\n mask_slice = slice(1, None)\n masks = masks[:, mask_slice, :, :]\n iou_pred = iou_pred[:, mask_slice]\n\n # Prepare output\n return masks, iou_pred\n\n def predict_masks(\n self,\n image_embeddings: torch.Tensor,\n image_pe: torch.Tensor,\n sparse_prompt_embeddings: torch.Tensor,\n dense_prompt_embeddings: torch.Tensor,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n # Concatenate output tokens\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n # Expand per-image data in batch direction to be per-mask\n src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n src = src + dense_prompt_embeddings\n pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n b, c, h, w = src.shape\n\n # Run the transformer\n hs, src = self.transformer(src, pos_src, tokens)\n iou_token_out = hs[:, 0, :]\n mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n # Upscale mask embeddings and predict masks using the mask tokens\n src = src.transpose(1, 2).view(b, c, h, w)\n upscaled_embedding = self.output_upscaling(src)\n hyper_in_list: List[torch.Tensor] = []\n for i in range(self.num_mask_tokens):\n hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\n hyper_in = torch.stack(hyper_in_list, dim=1)\n b, c, h, w = upscaled_embedding.shape\n masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)\n\n # Generate mask quality predictions\n iou_pred = self.iou_prediction_head(iou_token_out)\n\n return masks, iou_pred"
},
{
"identifier": "PromptEncoder",
"path": "tinysam/modeling/prompt_encoder.py",
"snippet": "class PromptEncoder(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n image_embedding_size: Tuple[int, int],\n input_image_size: Tuple[int, int],\n mask_in_chans: int,\n activation: Type[nn.Module] = nn.GELU,\n ) -> None:\n \"\"\"\n Encodes prompts for input to SAM's mask decoder.\n\n Arguments:\n embed_dim (int): The prompts' embedding dimension\n image_embedding_size (tuple(int, int)): The spatial size of the\n image embedding, as (H, W).\n input_image_size (int): The padded size of the image as input\n to the image encoder, as (H, W).\n mask_in_chans (int): The number of hidden channels used for\n encoding input masks.\n activation (nn.Module): The activation to use when encoding\n input masks.\n \"\"\"\n super().__init__()\n self.embed_dim = embed_dim\n self.input_image_size = input_image_size\n self.image_embedding_size = image_embedding_size\n self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners\n point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n self.point_embeddings = nn.ModuleList(point_embeddings)\n self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n self.mask_downscaling = nn.Sequential(\n nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans // 4),\n activation(),\n nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n LayerNorm2d(mask_in_chans),\n activation(),\n nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n )\n self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n def get_dense_pe(self) -> torch.Tensor:\n \"\"\"\n Returns the positional encoding used to encode point prompts,\n applied to a dense set of points the shape of the image encoding.\n\n Returns:\n torch.Tensor: Positional encoding with shape\n 1x(embed_dim)x(embedding_h)x(embedding_w)\n \"\"\"\n return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n def _embed_points(\n self,\n points: torch.Tensor,\n labels: torch.Tensor,\n pad: bool,\n ) -> torch.Tensor:\n \"\"\"Embeds point prompts.\"\"\"\n points = points + 0.5 # Shift to center of pixel\n if pad:\n padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n points = torch.cat([points, padding_point], dim=1)\n labels = torch.cat([labels, padding_label], dim=1)\n point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n point_embedding[labels == -1] = 0.0\n point_embedding[labels == -1] += self.not_a_point_embed.weight\n point_embedding[labels == 0] += self.point_embeddings[0].weight\n point_embedding[labels == 1] += self.point_embeddings[1].weight\n return point_embedding\n\n def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds box prompts.\"\"\"\n boxes = boxes + 0.5 # Shift to center of pixel\n coords = boxes.reshape(-1, 2, 2)\n corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n return corner_embedding\n\n def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n \"\"\"Embeds mask inputs.\"\"\"\n mask_embedding = self.mask_downscaling(masks)\n return mask_embedding\n\n def _get_batch_size(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> int:\n \"\"\"\n Gets the batch size of the output given the batch size of the input prompts.\n \"\"\"\n if points is not None:\n return points[0].shape[0]\n elif boxes is not None:\n return boxes.shape[0]\n elif masks is not None:\n return masks.shape[0]\n else:\n return 1\n\n def _get_device(self) -> torch.device:\n return self.point_embeddings[0].weight.device\n\n def forward(\n self,\n points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n boxes: Optional[torch.Tensor],\n masks: Optional[torch.Tensor],\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Embeds different types of prompts, returning both sparse and dense\n embeddings.\n\n Arguments:\n points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n and labels to embed.\n boxes (torch.Tensor or none): boxes to embed\n masks (torch.Tensor or none): masks to embed\n\n Returns:\n torch.Tensor: sparse embeddings for the points and boxes, with shape\n BxNx(embed_dim), where N is determined by the number of input points\n and boxes.\n torch.Tensor: dense embeddings for the masks, in the shape\n Bx(embed_dim)x(embed_H)x(embed_W)\n \"\"\"\n bs = self._get_batch_size(points, boxes, masks)\n sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n if points is not None:\n coords, labels = points\n point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n if boxes is not None:\n box_embeddings = self._embed_boxes(boxes)\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n if masks is not None:\n dense_embeddings = self._embed_masks(masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n )\n\n return sparse_embeddings, dense_embeddings"
}
] | import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple, Union
from .tiny_vit_sam import TinyViT
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder | 5,529 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: Union[ImageEncoderViT, TinyViT],
prompt_encoder: PromptEncoder,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: Union[ImageEncoderViT, TinyViT],
prompt_encoder: PromptEncoder, | mask_decoder: MaskDecoder, | 2 | 2023-12-19 11:25:54+00:00 | 8k |
dcharatan/pixelsplat | src/model/encoder/epipolar/epipolar_transformer.py | [
{
"identifier": "get_depth",
"path": "src/geometry/epipolar_lines.py",
"snippet": "def get_depth(\n origins: Float[Tensor, \"*#batch 3\"],\n directions: Float[Tensor, \"*#batch 3\"],\n xy: Float[Tensor, \"*#batch 2\"],\n extrinsics: Float[Tensor, \"*#batch 4 4\"],\n intrinsics: Float[Tensor, \"*#batch 3 3\"],\n) -> Float[Tensor, \" *batch\"]:\n \"\"\"Calculate the depths that correspond to the specified 2D points on the epipolar\n lines defined by the origins and directions. The extrinsics and intrinsics are for\n the images the 2D points lie on.\n \"\"\"\n xyz = lift_to_3d(origins, directions, xy, extrinsics, intrinsics)\n return (xyz - origins).norm(dim=-1)"
},
{
"identifier": "get_cfg",
"path": "src/global_cfg.py",
"snippet": "def get_cfg() -> DictConfig:\n global cfg\n return cfg"
},
{
"identifier": "PositionalEncoding",
"path": "src/model/encodings/positional_encoding.py",
"snippet": "class PositionalEncoding(nn.Module):\n \"\"\"For the sake of simplicity, this encodes values in the range [0, 1].\"\"\"\n\n frequencies: Float[Tensor, \"frequency phase\"]\n phases: Float[Tensor, \"frequency phase\"]\n\n def __init__(self, num_octaves: int):\n super().__init__()\n octaves = torch.arange(num_octaves).float()\n\n # The lowest frequency has a period of 1.\n frequencies = 2 * torch.pi * 2**octaves\n frequencies = repeat(frequencies, \"f -> f p\", p=2)\n self.register_buffer(\"frequencies\", frequencies, persistent=False)\n\n # Choose the phases to match sine and cosine.\n phases = torch.tensor([0, 0.5 * torch.pi], dtype=torch.float32)\n phases = repeat(phases, \"p -> f p\", f=num_octaves)\n self.register_buffer(\"phases\", phases, persistent=False)\n\n def forward(\n self,\n samples: Float[Tensor, \"*batch dim\"],\n ) -> Float[Tensor, \"*batch embedded_dim\"]:\n samples = einsum(samples, self.frequencies, \"... d, f p -> ... d f p\")\n return rearrange(torch.sin(samples + self.phases), \"... d f p -> ... (d f p)\")\n\n def d_out(self, dimensionality: int):\n return self.frequencies.numel() * dimensionality"
},
{
"identifier": "Transformer",
"path": "src/model/transformer/transformer.py",
"snippet": "class Transformer(nn.Module):\n def __init__(\n self,\n dim,\n depth,\n heads,\n dim_head,\n mlp_dim,\n dropout=0.0,\n selfatt=True,\n kv_dim=None,\n feed_forward_layer=FeedForward,\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n PreNorm(\n dim,\n Attention(\n dim,\n heads=heads,\n dim_head=dim_head,\n dropout=dropout,\n selfatt=selfatt,\n kv_dim=kv_dim,\n ),\n ),\n PreNorm(dim, feed_forward_layer(dim, mlp_dim, dropout=dropout)),\n ]\n )\n )\n\n def forward(self, x, z=None, **kwargs):\n for attn, ff in self.layers:\n x = attn(x, z=z) + x\n x = ff(x, **kwargs) + x\n return x"
},
{
"identifier": "depth_to_relative_disparity",
"path": "src/model/encoder/epipolar/conversions.py",
"snippet": "def depth_to_relative_disparity(\n depth: Float[Tensor, \"*#batch\"],\n near: Float[Tensor, \"*#batch\"],\n far: Float[Tensor, \"*#batch\"],\n eps: float = 1e-10,\n) -> Float[Tensor, \" *batch\"]:\n \"\"\"Convert depth to relative disparity, where 0 is near and 1 is far\"\"\"\n disp_near = 1 / (near + eps)\n disp_far = 1 / (far + eps)\n disp = 1 / (depth + eps)\n return 1 - (disp - disp_far) / (disp_near - disp_far + eps)"
},
{
"identifier": "EpipolarSampler",
"path": "src/model/encoder/epipolar/epipolar_sampler.py",
"snippet": "class EpipolarSampler(nn.Module):\n num_samples: int\n index_v: Index\n transpose_v: Index\n transpose_ov: Index\n\n def __init__(\n self,\n num_views: int,\n num_samples: int,\n ) -> None:\n super().__init__()\n self.num_samples = num_samples\n\n # Generate indices needed to sample only other views.\n _, index_v = generate_heterogeneous_index(num_views)\n t_v, t_ov = generate_heterogeneous_index_transpose(num_views)\n self.register_buffer(\"index_v\", index_v, persistent=False)\n self.register_buffer(\"transpose_v\", t_v, persistent=False)\n self.register_buffer(\"transpose_ov\", t_ov, persistent=False)\n\n def forward(\n self,\n images: Float[Tensor, \"batch view channel height width\"],\n extrinsics: Float[Tensor, \"batch view 4 4\"],\n intrinsics: Float[Tensor, \"batch view 3 3\"],\n near: Float[Tensor, \"batch view\"],\n far: Float[Tensor, \"batch view\"],\n ) -> EpipolarSampling:\n device = images.device\n b, v, _, _, _ = images.shape\n\n # Generate the rays that are projected onto other views.\n xy_ray, origins, directions = self.generate_image_rays(\n images, extrinsics, intrinsics\n )\n\n # Select the camera extrinsics and intrinsics to project onto. For each context\n # view, this means all other context views in the batch.\n projection = project_rays(\n rearrange(origins, \"b v r xyz -> b v () r xyz\"),\n rearrange(directions, \"b v r xyz -> b v () r xyz\"),\n rearrange(self.collect(extrinsics), \"b v ov i j -> b v ov () i j\"),\n rearrange(self.collect(intrinsics), \"b v ov i j -> b v ov () i j\"),\n rearrange(near, \"b v -> b v () ()\"),\n rearrange(far, \"b v -> b v () ()\"),\n )\n\n\n # Generate sample points.\n s = self.num_samples\n sample_depth = (torch.arange(s, device=device) + 0.5) / s\n sample_depth = rearrange(sample_depth, \"s -> s ()\")\n xy_min = projection[\"xy_min\"].nan_to_num(posinf=0, neginf=0) \n xy_min = xy_min * projection[\"overlaps_image\"][..., None]\n xy_min = rearrange(xy_min, \"b v ov r xy -> b v ov r () xy\")\n xy_max = projection[\"xy_max\"].nan_to_num(posinf=0, neginf=0) \n xy_max = xy_max * projection[\"overlaps_image\"][..., None]\n xy_max = rearrange(xy_max, \"b v ov r xy -> b v ov r () xy\")\n xy_sample = xy_min + sample_depth * (xy_max - xy_min)\n\n # The samples' shape is (batch, view, other_view, ...). However, before the\n # transpose, the view dimension refers to the view from which the ray is cast,\n # not the view from which samples are drawn. Thus, we need to transpose the\n # samples so that the view dimension refers to the view from which samples are\n # drawn. If the diagonal weren't removed for efficiency, this would be a literal\n # transpose. In our case, it's as if the diagonal were re-added, the transpose\n # were taken, and the diagonal were then removed again.\n samples = self.transpose(xy_sample)\n samples = F.grid_sample(\n rearrange(images, \"b v c h w -> (b v) c h w\"),\n rearrange(2 * samples - 1, \"b v ov r s xy -> (b v) (ov r s) () xy\"),\n mode=\"bilinear\",\n padding_mode=\"zeros\",\n align_corners=False,\n )\n samples = rearrange(\n samples, \"(b v) c (ov r s) () -> b v ov r s c\", b=b, v=v, ov=v - 1, s=s\n )\n samples = self.transpose(samples)\n\n # Zero out invalid samples.\n samples = samples * projection[\"overlaps_image\"][..., None, None]\n\n half_span = 0.5 / s\n return EpipolarSampling(\n features=samples,\n valid=projection[\"overlaps_image\"],\n xy_ray=xy_ray,\n xy_sample=xy_sample,\n xy_sample_near=xy_min + (sample_depth - half_span) * (xy_max - xy_min),\n xy_sample_far=xy_min + (sample_depth + half_span) * (xy_max - xy_min),\n origins=origins,\n directions=directions,\n )\n\n def generate_image_rays(\n self,\n images: Float[Tensor, \"batch view channel height width\"],\n extrinsics: Float[Tensor, \"batch view 4 4\"],\n intrinsics: Float[Tensor, \"batch view 3 3\"],\n ) -> tuple[\n Float[Tensor, \"batch view ray 2\"], # xy\n Float[Tensor, \"batch view ray 3\"], # origins\n Float[Tensor, \"batch view ray 3\"], # directions\n ]:\n \"\"\"Generate the rays along which Gaussians are defined. For now, these rays are\n simply arranged in a grid.\n \"\"\"\n b, v, _, h, w = images.shape\n xy, _ = sample_image_grid((h, w), device=images.device)\n origins, directions = get_world_rays(\n rearrange(xy, \"h w xy -> (h w) xy\"),\n rearrange(extrinsics, \"b v i j -> b v () i j\"),\n rearrange(intrinsics, \"b v i j -> b v () i j\"),\n )\n return repeat(xy, \"h w xy -> b v (h w) xy\", b=b, v=v), origins, directions\n\n def transpose(\n self,\n x: Shaped[Tensor, \"batch view other_view *rest\"],\n ) -> Shaped[Tensor, \"batch view other_view *rest\"]:\n b, v, ov, *_ = x.shape\n t_b = torch.arange(b, device=x.device)\n t_b = repeat(t_b, \"b -> b v ov\", v=v, ov=ov)\n t_v = repeat(self.transpose_v, \"v ov -> b v ov\", b=b)\n t_ov = repeat(self.transpose_ov, \"v ov -> b v ov\", b=b)\n return x[t_b, t_v, t_ov]\n\n def collect(\n self,\n target: Shaped[Tensor, \"batch view ...\"],\n ) -> Shaped[Tensor, \"batch view view-1 ...\"]:\n b, v, *_ = target.shape\n index_b = torch.arange(b, device=target.device)\n index_b = repeat(index_b, \"b -> b v ov\", v=v, ov=v - 1)\n index_v = repeat(self.index_v, \"v ov -> b v ov\", b=b)\n return target[index_b, index_v]"
},
{
"identifier": "EpipolarSampling",
"path": "src/model/encoder/epipolar/epipolar_sampler.py",
"snippet": "class EpipolarSampling:\n features: Float[Tensor, \"batch view other_view ray sample channel\"]\n valid: Bool[Tensor, \"batch view other_view ray\"]\n xy_ray: Float[Tensor, \"batch view ray 2\"]\n xy_sample: Float[Tensor, \"batch view other_view ray sample 2\"]\n xy_sample_near: Float[Tensor, \"batch view other_view ray sample 2\"]\n xy_sample_far: Float[Tensor, \"batch view other_view ray sample 2\"]\n origins: Float[Tensor, \"batch view ray 3\"]\n directions: Float[Tensor, \"batch view ray 3\"]"
},
{
"identifier": "ImageSelfAttention",
"path": "src/model/encoder/epipolar/image_self_attention.py",
"snippet": "class ImageSelfAttention(nn.Module):\n positional_encoding: nn.Sequential\n patch_embedder: nn.Sequential\n transformer: Transformer\n\n def __init__(\n self,\n cfg: ImageSelfAttentionCfg,\n d_in: int,\n d_out: int,\n ):\n super().__init__()\n self.positional_encoding = nn.Sequential(\n (pe := PositionalEncoding(cfg.num_octaves)),\n nn.Linear(pe.d_out(2), cfg.d_token),\n )\n self.patch_embedder = nn.Sequential(\n nn.Conv2d(d_in, cfg.d_token, cfg.patch_size, cfg.patch_size),\n nn.ReLU(),\n )\n self.transformer = Transformer(\n cfg.d_token,\n cfg.num_layers,\n cfg.num_heads,\n cfg.d_dot,\n cfg.d_mlp,\n )\n self.resampler = nn.ConvTranspose2d(\n cfg.d_token,\n d_out,\n cfg.patch_size,\n cfg.patch_size,\n )\n\n def forward(\n self,\n image: Float[Tensor, \"batch d_in height width\"],\n ) -> Float[Tensor, \"batch d_out height width\"]:\n # Embed patches so they become tokens.\n tokens = self.patch_embedder.forward(image)\n\n # Append positional information to the tokens.\n _, _, nh, nw = tokens.shape\n xy, _ = sample_image_grid((nh, nw), device=image.device)\n xy = self.positional_encoding.forward(xy)\n tokens = tokens + rearrange(xy, \"nh nw c -> c nh nw\")\n\n # Put the tokens through a transformer.\n _, _, nh, nw = tokens.shape\n tokens = rearrange(tokens, \"b c nh nw -> b (nh nw) c\")\n tokens = self.transformer.forward(tokens)\n\n # Resample the tokens back to the original resolution.\n tokens = rearrange(tokens, \"b (nh nw) c -> b c nh nw\", nh=nh, nw=nw)\n tokens = self.resampler.forward(tokens)\n\n return tokens"
},
{
"identifier": "ImageSelfAttentionCfg",
"path": "src/model/encoder/epipolar/image_self_attention.py",
"snippet": "class ImageSelfAttentionCfg:\n patch_size: int\n num_octaves: int\n num_layers: int\n num_heads: int\n d_token: int\n d_dot: int\n d_mlp: int"
}
] | from dataclasses import dataclass
from functools import partial
from typing import Optional
from einops import rearrange
from jaxtyping import Float
from torch import Tensor, nn
from ....geometry.epipolar_lines import get_depth
from ....global_cfg import get_cfg
from ...encodings.positional_encoding import PositionalEncoding
from ...transformer.transformer import Transformer
from .conversions import depth_to_relative_disparity
from .epipolar_sampler import EpipolarSampler, EpipolarSampling
from .image_self_attention import ImageSelfAttention, ImageSelfAttentionCfg | 4,280 |
@dataclass
class EpipolarTransformerCfg:
self_attention: ImageSelfAttentionCfg
num_octaves: int
num_layers: int
num_heads: int
num_samples: int
d_dot: int
d_mlp: int
downscale: int
class EpipolarTransformer(nn.Module):
cfg: EpipolarTransformerCfg
epipolar_sampler: EpipolarSampler
depth_encoding: nn.Sequential
transformer: Transformer
downscaler: Optional[nn.Conv2d]
upscaler: Optional[nn.ConvTranspose2d]
upscale_refinement: Optional[nn.Sequential]
def __init__(
self,
cfg: EpipolarTransformerCfg,
d_in: int,
) -> None:
super().__init__()
self.cfg = cfg
self.epipolar_sampler = EpipolarSampler(
get_cfg().dataset.view_sampler.num_context_views,
cfg.num_samples,
)
if self.cfg.num_octaves > 0:
self.depth_encoding = nn.Sequential(
(pe := PositionalEncoding(cfg.num_octaves)),
nn.Linear(pe.d_out(1), d_in),
)
feed_forward_layer = partial(ConvFeedForward, cfg.self_attention)
self.transformer = Transformer(
d_in,
cfg.num_layers,
cfg.num_heads,
cfg.d_dot,
cfg.d_mlp,
selfatt=False,
kv_dim=d_in,
feed_forward_layer=feed_forward_layer,
)
if cfg.downscale:
self.downscaler = nn.Conv2d(d_in, d_in, cfg.downscale, cfg.downscale)
self.upscaler = nn.ConvTranspose2d(d_in, d_in, cfg.downscale, cfg.downscale)
self.upscale_refinement = nn.Sequential(
nn.Conv2d(d_in, d_in * 2, 7, 1, 3),
nn.GELU(),
nn.Conv2d(d_in * 2, d_in, 7, 1, 3),
)
def forward(
self,
features: Float[Tensor, "batch view channel height width"],
extrinsics: Float[Tensor, "batch view 4 4"],
intrinsics: Float[Tensor, "batch view 3 3"],
near: Float[Tensor, "batch view"],
far: Float[Tensor, "batch view"],
) -> tuple[Float[Tensor, "batch view channel height width"], EpipolarSampling,]:
b, v, _, h, w = features.shape
# If needed, apply downscaling.
if self.downscaler is not None:
features = rearrange(features, "b v c h w -> (b v) c h w")
features = self.downscaler(features)
features = rearrange(features, "(b v) c h w -> b v c h w", b=b, v=v)
# Get the samples used for epipolar attention.
sampling = self.epipolar_sampler.forward(
features, extrinsics, intrinsics, near, far
)
if self.cfg.num_octaves > 0:
# Compute positionally encoded depths for the features.
collect = self.epipolar_sampler.collect
|
@dataclass
class EpipolarTransformerCfg:
self_attention: ImageSelfAttentionCfg
num_octaves: int
num_layers: int
num_heads: int
num_samples: int
d_dot: int
d_mlp: int
downscale: int
class EpipolarTransformer(nn.Module):
cfg: EpipolarTransformerCfg
epipolar_sampler: EpipolarSampler
depth_encoding: nn.Sequential
transformer: Transformer
downscaler: Optional[nn.Conv2d]
upscaler: Optional[nn.ConvTranspose2d]
upscale_refinement: Optional[nn.Sequential]
def __init__(
self,
cfg: EpipolarTransformerCfg,
d_in: int,
) -> None:
super().__init__()
self.cfg = cfg
self.epipolar_sampler = EpipolarSampler(
get_cfg().dataset.view_sampler.num_context_views,
cfg.num_samples,
)
if self.cfg.num_octaves > 0:
self.depth_encoding = nn.Sequential(
(pe := PositionalEncoding(cfg.num_octaves)),
nn.Linear(pe.d_out(1), d_in),
)
feed_forward_layer = partial(ConvFeedForward, cfg.self_attention)
self.transformer = Transformer(
d_in,
cfg.num_layers,
cfg.num_heads,
cfg.d_dot,
cfg.d_mlp,
selfatt=False,
kv_dim=d_in,
feed_forward_layer=feed_forward_layer,
)
if cfg.downscale:
self.downscaler = nn.Conv2d(d_in, d_in, cfg.downscale, cfg.downscale)
self.upscaler = nn.ConvTranspose2d(d_in, d_in, cfg.downscale, cfg.downscale)
self.upscale_refinement = nn.Sequential(
nn.Conv2d(d_in, d_in * 2, 7, 1, 3),
nn.GELU(),
nn.Conv2d(d_in * 2, d_in, 7, 1, 3),
)
def forward(
self,
features: Float[Tensor, "batch view channel height width"],
extrinsics: Float[Tensor, "batch view 4 4"],
intrinsics: Float[Tensor, "batch view 3 3"],
near: Float[Tensor, "batch view"],
far: Float[Tensor, "batch view"],
) -> tuple[Float[Tensor, "batch view channel height width"], EpipolarSampling,]:
b, v, _, h, w = features.shape
# If needed, apply downscaling.
if self.downscaler is not None:
features = rearrange(features, "b v c h w -> (b v) c h w")
features = self.downscaler(features)
features = rearrange(features, "(b v) c h w -> b v c h w", b=b, v=v)
# Get the samples used for epipolar attention.
sampling = self.epipolar_sampler.forward(
features, extrinsics, intrinsics, near, far
)
if self.cfg.num_octaves > 0:
# Compute positionally encoded depths for the features.
collect = self.epipolar_sampler.collect | depths = get_depth( | 0 | 2023-12-20 19:45:59+00:00 | 8k |
hutaiHang/Faster-Diffusion | controlnet_demo.py | [
{
"identifier": "register_controlnet_pipeline",
"path": "utils_sd.py",
"snippet": "def register_controlnet_pipeline(pipe):\r\n def new_call(self):\r\n @torch.no_grad()\r\n def call(\r\n prompt: Union[str, List[str]] = None,\r\n image: Union[\r\n torch.FloatTensor,\r\n PIL.Image.Image,\r\n np.ndarray,\r\n List[torch.FloatTensor],\r\n List[PIL.Image.Image],\r\n List[np.ndarray],\r\n ] = None,\r\n height: Optional[int] = None,\r\n width: Optional[int] = None,\r\n num_inference_steps: int = 50,\r\n guidance_scale: float = 7.5,\r\n negative_prompt: Optional[Union[str, List[str]]] = None,\r\n num_images_per_prompt: Optional[int] = 1,\r\n eta: float = 0.0,\r\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\r\n latents: Optional[torch.FloatTensor] = None,\r\n prompt_embeds: Optional[torch.FloatTensor] = None,\r\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\r\n output_type: Optional[str] = \"pil\",\r\n return_dict: bool = True,\r\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\r\n callback_steps: int = 1,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n controlnet_conditioning_scale: Union[float, List[float]] = 1.0,\r\n guess_mode: bool = False,\r\n ):\r\n # 1. Check inputs. Raise error if not correct\r\n self.check_inputs(\r\n prompt,\r\n image,\r\n callback_steps,\r\n negative_prompt,\r\n prompt_embeds,\r\n negative_prompt_embeds,\r\n controlnet_conditioning_scale,\r\n )\r\n\r\n # 2. Define call parameters\r\n if prompt is not None and isinstance(prompt, str):\r\n batch_size = 1\r\n elif prompt is not None and isinstance(prompt, list):\r\n batch_size = len(prompt)\r\n else:\r\n batch_size = prompt_embeds.shape[0]\r\n\r\n device = self._execution_device\r\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\r\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\r\n # corresponds to doing no classifier free guidance.\r\n do_classifier_free_guidance = guidance_scale > 1.0\r\n\r\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\r\n\r\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\r\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\r\n\r\n global_pool_conditions = (\r\n controlnet.config.global_pool_conditions\r\n if isinstance(controlnet, ControlNetModel)\r\n else controlnet.nets[0].config.global_pool_conditions\r\n )\r\n guess_mode = guess_mode or global_pool_conditions\r\n\r\n # 3. Encode input prompt\r\n text_encoder_lora_scale = (\r\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\r\n )\r\n prompt_embeds = self._encode_prompt(\r\n prompt,\r\n device,\r\n num_images_per_prompt,\r\n do_classifier_free_guidance,\r\n negative_prompt,\r\n prompt_embeds=prompt_embeds,\r\n negative_prompt_embeds=negative_prompt_embeds,\r\n lora_scale=text_encoder_lora_scale,\r\n )\r\n\r\n # 4. Prepare image\r\n if isinstance(controlnet, ControlNetModel):\r\n image = self.prepare_image(\r\n image=image,\r\n width=width,\r\n height=height,\r\n batch_size=batch_size * num_images_per_prompt,\r\n num_images_per_prompt=num_images_per_prompt,\r\n device=device,\r\n dtype=controlnet.dtype,\r\n do_classifier_free_guidance=do_classifier_free_guidance,\r\n guess_mode=guess_mode,\r\n )\r\n height, width = image.shape[-2:]\r\n elif isinstance(controlnet, MultiControlNetModel):\r\n images = []\r\n\r\n for image_ in image:\r\n image_ = self.prepare_image(\r\n image=image_,\r\n width=width,\r\n height=height,\r\n batch_size=batch_size * num_images_per_prompt,\r\n num_images_per_prompt=num_images_per_prompt,\r\n device=device,\r\n dtype=controlnet.dtype,\r\n do_classifier_free_guidance=do_classifier_free_guidance,\r\n guess_mode=guess_mode,\r\n )\r\n\r\n images.append(image_)\r\n\r\n image = images\r\n height, width = image[0].shape[-2:]\r\n else:\r\n assert False\r\n\r\n # 5. Prepare timesteps\r\n self.scheduler.set_timesteps(num_inference_steps, device=device)\r\n timesteps = self.scheduler.timesteps\r\n\r\n # 6. Prepare latent variables\r\n num_channels_latents = self.unet.config.in_channels\r\n latents = self.prepare_latents(\r\n batch_size * num_images_per_prompt,\r\n num_channels_latents,\r\n height,\r\n width,\r\n prompt_embeds.dtype,\r\n device,\r\n generator,\r\n latents,\r\n )\r\n self.init_latent = latents.detach().clone()\r\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\r\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\r\n\r\n # 8. Denoising loop\r\n #-------------------------------------------------------------\r\n all_steps = len(self.scheduler.timesteps)\r\n curr_span = 1\r\n curr_step = 0\r\n\r\n idx = 1\r\n keytime = [0,1,2,3,5,10,15,25,35]\r\n keytime.append(all_steps)\r\n while curr_step<all_steps:\r\n register_time(self.unet, curr_step)\r\n\r\n if curr_span>0:\r\n time_ls = []\r\n for i in range(curr_step, curr_step+curr_span):\r\n if i<all_steps:\r\n time_ls.append(self.scheduler.timesteps[i])\r\n else:\r\n break\r\n\r\n ##--------------------------------\r\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\r\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, time_ls[0])\r\n \r\n if curr_step in [0,1,2,3,5,10,15,25,35]:\r\n # controlnet(s) inference\r\n control_model_input = latent_model_input\r\n controlnet_prompt_embeds = prompt_embeds\r\n\r\n down_block_res_samples, mid_block_res_sample = self.controlnet(\r\n control_model_input,\r\n time_ls[0],\r\n encoder_hidden_states=controlnet_prompt_embeds,\r\n controlnet_cond=image,\r\n conditioning_scale=controlnet_conditioning_scale,\r\n guess_mode=guess_mode,\r\n return_dict=False,\r\n )\r\n\r\n\r\n #----------------------save controlnet feature-------------------------\r\n #useless, should delete\r\n # setattr(self, 'downres_samples', deepcopy(down_block_res_samples))\r\n # setattr(self, 'midres_sample', mid_block_res_sample.detach().clone())\r\n #-----------------------save controlnet feature------------------------\r\n else:\r\n down_block_res_samples = None #self.downres_samples\r\n mid_block_res_sample = None #self.midres_sample\r\n # predict the noise residual\r\n noise_pred = self.unet(\r\n latent_model_input,\r\n time_ls,\r\n encoder_hidden_states=prompt_embeds,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n down_block_additional_residuals=down_block_res_samples,\r\n mid_block_additional_residual=mid_block_res_sample,\r\n return_dict=False,\r\n )[0]\r\n\r\n # perform guidance\r\n if do_classifier_free_guidance:\r\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\r\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\r\n\r\n # compute the previous noisy sample x_t -> x_t-1\r\n\r\n if isinstance(time_ls, list):\r\n step_span = len(time_ls)\r\n bs = noise_pred.shape[0]\r\n bs_perstep = bs//step_span\r\n\r\n denoised_latent = latents\r\n for i, timestep in enumerate(time_ls):\r\n if timestep/1000 < 0.5:\r\n denoised_latent = denoised_latent + 0.003*self.init_latent \r\n curr_noise = noise_pred[i*bs_perstep:(i+1)*bs_perstep]\r\n denoised_latent = self.scheduler.step(curr_noise, timestep, denoised_latent, **extra_step_kwargs, return_dict=False)[0]\r\n \r\n latents = denoised_latent\r\n ##----------------------------------------\r\n curr_step += curr_span\r\n idx += 1\r\n if curr_step<all_steps:\r\n curr_span = keytime[idx] - keytime[idx-1]\r\n\r\n #-------------------------------------------------------------\r\n # If we do sequential model offloading, let's offload unet and controlnet\r\n # manually for max memory savings\r\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\r\n self.unet.to(\"cpu\")\r\n self.controlnet.to(\"cpu\")\r\n torch.cuda.empty_cache()\r\n\r\n if not output_type == \"latent\":\r\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\r\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\r\n else:\r\n image = latents\r\n has_nsfw_concept = None\r\n\r\n if has_nsfw_concept is None:\r\n do_denormalize = [True] * image.shape[0]\r\n else:\r\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\r\n\r\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\r\n\r\n # Offload last model to CPU\r\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\r\n self.final_offload_hook.offload()\r\n\r\n if not return_dict:\r\n return (image, has_nsfw_concept)\r\n\r\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)\r\n return call\r\n pipe.call = new_call(pipe)\r"
},
{
"identifier": "register_faster_forward",
"path": "utils_sd.py",
"snippet": "def register_faster_forward(model, mod = '50ls'):\r\n def faster_forward(self):\r\n def forward(\r\n sample: torch.FloatTensor,\r\n timestep: Union[torch.Tensor, float, int],\r\n encoder_hidden_states: torch.Tensor,\r\n class_labels: Optional[torch.Tensor] = None,\r\n timestep_cond: Optional[torch.Tensor] = None,\r\n attention_mask: Optional[torch.Tensor] = None,\r\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\r\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\r\n mid_block_additional_residual: Optional[torch.Tensor] = None,\r\n return_dict: bool = True,\r\n ) -> Union[UNet2DConditionOutput, Tuple]:\r\n r\"\"\"\r\n Args:\r\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\r\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\r\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\r\n return_dict (`bool`, *optional*, defaults to `True`):\r\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\r\n cross_attention_kwargs (`dict`, *optional*):\r\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\r\n `self.processor` in\r\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\r\n\r\n Returns:\r\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\r\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\r\n returning a tuple, the first element is the sample tensor.\r\n \"\"\"\r\n # By default samples have to be AT least a multiple of the overall upsampling factor.\r\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\r\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\r\n # on the fly if necessary.\r\n default_overall_up_factor = 2**self.num_upsamplers\r\n\r\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\r\n forward_upsample_size = False\r\n upsample_size = None\r\n\r\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\r\n logger.info(\"Forward upsample size to force interpolation output size.\")\r\n forward_upsample_size = True\r\n\r\n # prepare attention_mask\r\n if attention_mask is not None:\r\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\r\n attention_mask = attention_mask.unsqueeze(1)\r\n\r\n # 0. center input if necessary\r\n if self.config.center_input_sample:\r\n sample = 2 * sample - 1.0\r\n\r\n # 1. time\r\n if isinstance(timestep, list):\r\n timesteps = timestep[0]\r\n step = len(timestep)\r\n else:\r\n timesteps = timestep\r\n step = 1\r\n if not torch.is_tensor(timesteps) and (not isinstance(timesteps,list)):\r\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\r\n # This would be a good case for the `match` statement (Python 3.10+)\r\n is_mps = sample.device.type == \"mps\"\r\n if isinstance(timestep, float):\r\n dtype = torch.float32 if is_mps else torch.float64\r\n else:\r\n dtype = torch.int32 if is_mps else torch.int64\r\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\r\n elif (not isinstance(timesteps,list)) and len(timesteps.shape) == 0:\r\n timesteps = timesteps[None].to(sample.device)\r\n \r\n if (not isinstance(timesteps,list)) and len(timesteps.shape) == 1:\r\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\r\n timesteps = timesteps.expand(sample.shape[0])\r\n elif isinstance(timesteps, list):\r\n #timesteps list, such as [981,961,941]\r\n timesteps = warpped_timestep(timesteps, sample.shape[0]).to(sample.device)\r\n t_emb = self.time_proj(timesteps)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # but time_embedding might actually be running in fp16. so we need to cast here.\r\n # there might be better ways to encapsulate this.\r\n t_emb = t_emb.to(dtype=self.dtype)\r\n\r\n emb = self.time_embedding(t_emb, timestep_cond)\r\n\r\n if self.class_embedding is not None:\r\n if class_labels is None:\r\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\r\n\r\n if self.config.class_embed_type == \"timestep\":\r\n class_labels = self.time_proj(class_labels)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # there might be better ways to encapsulate this.\r\n class_labels = class_labels.to(dtype=sample.dtype)\r\n\r\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\r\n\r\n if self.config.class_embeddings_concat:\r\n emb = torch.cat([emb, class_emb], dim=-1)\r\n else:\r\n emb = emb + class_emb\r\n\r\n if self.config.addition_embed_type == \"text\":\r\n aug_emb = self.add_embedding(encoder_hidden_states)\r\n emb = emb + aug_emb\r\n\r\n if self.time_embed_act is not None:\r\n emb = self.time_embed_act(emb)\r\n\r\n if self.encoder_hid_proj is not None:\r\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\r\n\r\n #===============\r\n order = self.order #timestep, start by 0\r\n #===============\r\n ipow = int(np.sqrt(9 + 8*order))\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35]\r\n if isinstance(mod, int):\r\n cond = order % mod == 0\r\n elif mod == \"pro\":\r\n cond = ipow * ipow == (9 + 8 * order)\r\n elif mod == \"50ls\":\r\n cond = order in [0, 1, 2, 3, 5, 10, 15, 25, 35] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls2\":\r\n cond = order in [0, 10, 11, 12, 15, 20, 25, 30,35,45] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls3\":\r\n cond = order in [0, 20, 25, 30,35,45,46,47,48,49] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"50ls4\":\r\n cond = order in [0, 9, 13, 14, 15, 28, 29, 32, 36,45] #40 #[0,1,2,3, 5, 10, 15] #[0, 1, 2, 3, 5, 10, 15, 25, 35, 40]\r\n elif mod == \"100ls\":\r\n cond = order > 85 or order < 10 or order % 5 == 0\r\n elif mod == \"75ls\":\r\n cond = order > 65 or order < 10 or order % 5 == 0\r\n elif mod == \"s2\":\r\n cond = order < 20 or order > 40 or order % 2 == 0\r\n\r\n if cond:\r\n # print('current timestep:', order)\r\n # 2. pre-process\r\n sample = self.conv_in(sample)\r\n\r\n # 3. down\r\n down_block_res_samples = (sample,)\r\n for downsample_block in self.down_blocks:\r\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\r\n sample, res_samples = downsample_block(\r\n hidden_states=sample,\r\n temb=emb,\r\n encoder_hidden_states=encoder_hidden_states,\r\n attention_mask=attention_mask,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n )\r\n else:\r\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\r\n\r\n down_block_res_samples += res_samples\r\n\r\n if down_block_additional_residuals is not None:\r\n new_down_block_res_samples = ()\r\n\r\n for down_block_res_sample, down_block_additional_residual in zip(\r\n down_block_res_samples, down_block_additional_residuals\r\n ):\r\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\r\n new_down_block_res_samples += (down_block_res_sample,)\r\n\r\n down_block_res_samples = new_down_block_res_samples\r\n\r\n # 4. mid\r\n if self.mid_block is not None:\r\n sample = self.mid_block(\r\n sample,\r\n emb,\r\n encoder_hidden_states=encoder_hidden_states,\r\n attention_mask=attention_mask,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n )\r\n\r\n if mid_block_additional_residual is not None:\r\n sample = sample + mid_block_additional_residual\r\n\r\n #----------------------save feature-------------------------\r\n # setattr(self, 'skip_feature', (tmp_sample.clone() for tmp_sample in down_block_res_samples))\r\n setattr(self, 'skip_feature', deepcopy(down_block_res_samples))\r\n setattr(self, 'toup_feature', sample.detach().clone())\r\n #-----------------------save feature------------------------\r\n\r\n\r\n\r\n #-------------------expand feature for parallel---------------\r\n if isinstance(timestep, list):\r\n #timesteps list, such as [981,961,941]\r\n timesteps = warpped_timestep(timestep, sample.shape[0]).to(sample.device)\r\n t_emb = self.time_proj(timesteps)\r\n\r\n # `Timesteps` does not contain any weights and will always return f32 tensors\r\n # but time_embedding might actually be running in fp16. so we need to cast here.\r\n # there might be better ways to encapsulate this.\r\n t_emb = t_emb.to(dtype=self.dtype)\r\n\r\n emb = self.time_embedding(t_emb, timestep_cond)\r\n\r\n down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)\r\n sample = warpped_feature(sample, step)\r\n encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)\r\n #-------------------expand feature for parallel---------------\r\n \r\n else:\r\n down_block_res_samples = self.skip_feature\r\n sample = self.toup_feature\r\n\r\n #-------------------expand feature for parallel---------------\r\n down_block_res_samples = warpped_skip_feature(down_block_res_samples, step)\r\n sample = warpped_feature(sample, step)\r\n encoder_hidden_states = warpped_text_emb(encoder_hidden_states, step)\r\n #-------------------expand feature for parallel---------------\r\n\r\n # 5. up\r\n for i, upsample_block in enumerate(self.up_blocks):\r\n is_final_block = i == len(self.up_blocks) - 1\r\n\r\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\r\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\r\n\r\n # if we have not reached the final block and need to forward the\r\n # upsample size, we do it here\r\n if not is_final_block and forward_upsample_size:\r\n upsample_size = down_block_res_samples[-1].shape[2:]\r\n\r\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\r\n sample = upsample_block(\r\n hidden_states=sample,\r\n temb=emb,\r\n res_hidden_states_tuple=res_samples,\r\n encoder_hidden_states=encoder_hidden_states,\r\n cross_attention_kwargs=cross_attention_kwargs,\r\n upsample_size=upsample_size,\r\n attention_mask=attention_mask,\r\n )\r\n else:\r\n sample = upsample_block(\r\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size\r\n )\r\n\r\n # 6. post-process\r\n if self.conv_norm_out:\r\n sample = self.conv_norm_out(sample)\r\n sample = self.conv_act(sample)\r\n sample = self.conv_out(sample)\r\n\r\n if not return_dict:\r\n return (sample,)\r\n\r\n return UNet2DConditionOutput(sample=sample)\r\n return forward\r\n if model.__class__.__name__ == 'UNet2DConditionModel':\r\n model.forward = faster_forward(model)\r"
},
{
"identifier": "seed_everything",
"path": "utils_sd.py",
"snippet": "def seed_everything(seed):\r\n torch.manual_seed(seed)\r\n torch.cuda.manual_seed(seed)\r\n random.seed(seed)\r\n np.random.seed(seed)\r"
}
] | import numpy as np
import cv2
import time
import torch
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, DDIMScheduler
from controlnet_aux import HEDdetector, OpenposeDetector
from diffusers.utils import load_image
from utils_sd import register_controlnet_pipeline, register_faster_forward, seed_everything | 6,000 |
image = load_image("images/condition.jpeg")
image = np.array(image)
low_threshold = 100
high_threshold = 200
image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
image_condition = Image.fromarray(image)
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16
).to('cuda')
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
).to('cuda')
print('Warm up of the gpu')
for i in range(2):
image = pipe("Mona Lisa", image_condition).images[0]
#-------------------
print("Start Generating")
seed_everything(8888)
start_time = time.time()
image = pipe("Mona Lisa", image_condition).images[0]
end_time = time.time()
print("Origin Pipeline: {:.3f} seconds".format(end_time-start_time))
image.save('images/canny_out_origin.png')
register_controlnet_pipeline(pipe)
|
image = load_image("images/condition.jpeg")
image = np.array(image)
low_threshold = 100
high_threshold = 200
image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
image_condition = Image.fromarray(image)
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16
).to('cuda')
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
).to('cuda')
print('Warm up of the gpu')
for i in range(2):
image = pipe("Mona Lisa", image_condition).images[0]
#-------------------
print("Start Generating")
seed_everything(8888)
start_time = time.time()
image = pipe("Mona Lisa", image_condition).images[0]
end_time = time.time()
print("Origin Pipeline: {:.3f} seconds".format(end_time-start_time))
image.save('images/canny_out_origin.png')
register_controlnet_pipeline(pipe) | register_faster_forward(pipe.unet) | 1 | 2023-12-15 05:03:37+00:00 | 8k |
FoundationVision/GLEE | app/GLEE/glee/backbone/eva02-dino.py | [
{
"identifier": "PatchEmbed",
"path": "app/GLEE/glee/backbone/eva_02_utils.py",
"snippet": "class PatchEmbed(nn.Module):\n \"\"\"\n Image to Patch Embedding.\n \"\"\"\n\n def __init__(\n self, kernel_size=(16, 16), stride=(16, 16), padding=(0, 0), in_chans=3, embed_dim=768\n ):\n \"\"\"\n Args:\n kernel_size (Tuple): kernel size of the projection layer.\n stride (Tuple): stride of the projection layer.\n padding (Tuple): padding size of the projection layer.\n in_chans (int): Number of input image channels.\n embed_dim (int): embed_dim (int): Patch embedding dimension.\n \"\"\"\n super().__init__()\n\n self.proj = nn.Conv2d(\n in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding\n )\n\n def forward(self, x):\n x = self.proj(x)\n # B C H W -> B H W C\n x = x.permute(0, 2, 3, 1)\n return x"
},
{
"identifier": "add_decomposed_rel_pos",
"path": "app/GLEE/glee/backbone/eva_02_utils.py",
"snippet": "def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size):\n \"\"\"\n Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950\n Args:\n attn (Tensor): attention map.\n q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).\n rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.\n rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.\n q_size (Tuple): spatial sequence size of query q with (q_h, q_w).\n k_size (Tuple): spatial sequence size of key k with (k_h, k_w).\n\n Returns:\n attn (Tensor): attention map with added relative positional embeddings.\n \"\"\"\n q_h, q_w = q_size\n k_h, k_w = k_size\n Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n\n B, _, dim = q.shape\n r_q = q.reshape(B, q_h, q_w, dim)\n rel_h = torch.einsum(\"bhwc,hkc->bhwk\", r_q, Rh)\n rel_w = torch.einsum(\"bhwc,wkc->bhwk\", r_q, Rw)\n\n attn = (\n attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n ).view(B, q_h * q_w, k_h * k_w)\n\n return attn"
},
{
"identifier": "get_abs_pos",
"path": "app/GLEE/glee/backbone/eva_02_utils.py",
"snippet": "def get_abs_pos(abs_pos, has_cls_token, hw):\n \"\"\"\n Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token\n dimension for the original embeddings.\n Args:\n abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).\n has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.\n hw (Tuple): size of input image tokens.\n\n Returns:\n Absolute positional embeddings after processing with shape (1, H, W, C)\n \"\"\"\n h, w = hw\n if has_cls_token:\n abs_pos = abs_pos[:, 1:]\n xy_num = abs_pos.shape[1]\n size = int(math.sqrt(xy_num))\n assert size * size == xy_num\n\n if size != h or size != w:\n new_abs_pos = F.interpolate(\n abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2),\n size=(h, w),\n mode=\"bicubic\",\n align_corners=False,\n )\n\n return new_abs_pos.permute(0, 2, 3, 1)\n else:\n return abs_pos.reshape(1, h, w, -1)"
},
{
"identifier": "window_partition",
"path": "app/GLEE/glee/backbone/eva_02_utils.py",
"snippet": "def window_partition(x, window_size):\n \"\"\"\n Partition into non-overlapping windows with padding if needed.\n Args:\n x (tensor): input tokens with [B, H, W, C].\n window_size (int): window size.\n\n Returns:\n windows: windows after partition with [B * num_windows, window_size, window_size, C].\n (Hp, Wp): padded height and width before partition\n \"\"\"\n B, H, W, C = x.shape\n\n pad_h = (window_size - H % window_size) % window_size\n pad_w = (window_size - W % window_size) % window_size\n if pad_h > 0 or pad_w > 0:\n x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))\n Hp, Wp = H + pad_h, W + pad_w\n\n x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n return windows, (Hp, Wp)"
},
{
"identifier": "window_unpartition",
"path": "app/GLEE/glee/backbone/eva_02_utils.py",
"snippet": "def window_unpartition(windows, window_size, pad_hw, hw):\n \"\"\"\n Window unpartition into original sequences and removing padding.\n Args:\n x (tensor): input tokens with [B * num_windows, window_size, window_size, C].\n window_size (int): window size.\n pad_hw (Tuple): padded height and width (Hp, Wp).\n hw (Tuple): original height and width (H, W) before padding.\n\n Returns:\n x: unpartitioned sequences with [B, H, W, C].\n \"\"\"\n Hp, Wp = pad_hw\n H, W = hw\n B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n if Hp > H or Wp > W:\n x = x[:, :H, :W, :].contiguous()\n return x"
},
{
"identifier": "VisionRotaryEmbeddingFast",
"path": "app/GLEE/glee/backbone/eva_02_utils.py",
"snippet": "class VisionRotaryEmbeddingFast(nn.Module):\n def __init__(\n self,\n dim,\n pt_seq_len=16,\n ft_seq_len=None,\n custom_freqs = None,\n freqs_for = 'lang',\n theta = 10000,\n max_freq = 10,\n num_freqs = 1,\n ):\n super().__init__()\n if custom_freqs:\n freqs = custom_freqs\n elif freqs_for == 'lang':\n freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))\n elif freqs_for == 'pixel':\n freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi\n elif freqs_for == 'constant':\n freqs = torch.ones(num_freqs).float()\n else:\n raise ValueError(f'unknown modality {freqs_for}')\n\n if ft_seq_len is None: ft_seq_len = pt_seq_len\n t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len\n\n freqs = torch.einsum('..., f -> ... f', t, freqs)\n freqs = repeat(freqs, '... n -> ... (n r)', r = 2)\n freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)\n\n freqs_cos = freqs.cos().view(-1, freqs.shape[-1])\n freqs_sin = freqs.sin().view(-1, freqs.shape[-1])\n\n self.register_buffer(\"freqs_cos\", freqs_cos)\n self.register_buffer(\"freqs_sin\", freqs_sin)\n\n print('======== shape of rope freq', self.freqs_cos.shape, '========')\n\n # def forward(self, t): return t * self.freqs_cos + rotate_half(t) * self.freqs_sin\n def forward(self, t): \n if t.shape[2] != self.freqs_cos.shape[0]:\n t_len = t.shape[2]\n output = t * self.freqs_cos[:t_len] + rotate_half(t) * self.freqs_sin[:t_len]\n else:\n output = t * self.freqs_cos + rotate_half(t) * self.freqs_sin\n return output"
}
] | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
import torch.nn.functional as F
import xformers.ops as xops
from functools import partial
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous
from detectron2.modeling.backbone import Backbone
from .eva_02_utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
VisionRotaryEmbeddingFast,
)
from timm.models.layers import DropPath
from fairscale.nn.checkpoint import checkpoint_wrapper | 4,384 | "bottleneck" conv layers.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
act_layer (callable): activation for all conv layers.
"""
super().__init__(in_channels, out_channels, 1)
self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = get_norm(norm, bottleneck_channels)
self.act1 = act_layer()
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
3,
padding=1,
bias=False,
)
self.norm2 = get_norm(norm, bottleneck_channels)
self.act2 = act_layer()
self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = get_norm(norm, out_channels)
for layer in [self.conv1, self.conv2, self.conv3]:
weight_init.c2_msra_fill(layer)
for layer in [self.norm1, self.norm2]:
layer.weight.data.fill_(1.0)
layer.bias.data.zero_()
# zero init last norm layer.
self.norm3.weight.data.zero_()
self.norm3.bias.data.zero_()
def forward(self, x):
out = x
for layer in self.children():
out = layer(out)
out = x + out
return out
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4*2/3,
qkv_bias=True,
drop_path=0.0,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
window_size=0,
use_residual_block=False,
rope=None,
xattn=True,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then not
use window attention.
use_residual_block (bool): If True, use a residual block after the MLP block.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
rope=rope,
xattn=xattn,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = SwiGLU(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
subln=True,
norm_layer=norm_layer,
)
self.window_size = window_size
self.use_residual_block = use_residual_block
if use_residual_block:
# Use a residual block with bottleneck channel as dim // 2
self.residual = ResBottleneckBlock(
in_channels=dim,
out_channels=dim,
bottleneck_channels=dim // 2,
norm="LN",
)
def forward(self, x):
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
|
try:
HAS_XFORMER=True
except:
HAS_XFORMER=False
pass
logger = logging.getLogger(__name__)
__all__ = ["EVA02_ViT", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"]
class SwiGLU(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.,
norm_layer=nn.LayerNorm, subln=False
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.w1 = nn.Linear(in_features, hidden_features)
self.w2 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity()
self.w3 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x1 = self.w1(x)
x2 = self.w2(x)
hidden = self.act(x1) * x2
x = self.ffn_ln(hidden)
x = self.w3(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
qk_scale=None,
attn_head_dim=None,
rope=None,
xattn=True,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q_proj = nn.Linear(dim, all_head_dim, bias=False)
self.k_proj = nn.Linear(dim, all_head_dim, bias=False)
self.v_proj = nn.Linear(dim, all_head_dim, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
self.rope = rope
self.xattn = xattn
self.proj = nn.Linear(all_head_dim, dim)
if not HAS_XFORMER:
self.xattn = False
def forward(self, x):
B, H, W, C = x.shape
x = x.view(B, -1, C)
N = H * W
q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias)
k = F.linear(input=x, weight=self.k_proj.weight, bias=None)
v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias)
q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C
k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
## rope
q = self.rope(q).type_as(v)
k = self.rope(k).type_as(v)
if self.xattn:
q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C
k = k.permute(0, 2, 1, 3)
v = v.permute(0, 2, 1, 3)
x = xops.memory_efficient_attention(q, k, v)
x = x.reshape(B, N, -1)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1).type_as(x)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = x.view(B, H, W, C)
return x
class ResBottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block without the last activation layer.
It contains 3 conv layers with kernels 1x1, 3x3, 1x1.
"""
def __init__(
self,
in_channels,
out_channels,
bottleneck_channels,
norm="LN",
act_layer=nn.GELU,
):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
act_layer (callable): activation for all conv layers.
"""
super().__init__(in_channels, out_channels, 1)
self.conv1 = Conv2d(in_channels, bottleneck_channels, 1, bias=False)
self.norm1 = get_norm(norm, bottleneck_channels)
self.act1 = act_layer()
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
3,
padding=1,
bias=False,
)
self.norm2 = get_norm(norm, bottleneck_channels)
self.act2 = act_layer()
self.conv3 = Conv2d(bottleneck_channels, out_channels, 1, bias=False)
self.norm3 = get_norm(norm, out_channels)
for layer in [self.conv1, self.conv2, self.conv3]:
weight_init.c2_msra_fill(layer)
for layer in [self.norm1, self.norm2]:
layer.weight.data.fill_(1.0)
layer.bias.data.zero_()
# zero init last norm layer.
self.norm3.weight.data.zero_()
self.norm3.bias.data.zero_()
def forward(self, x):
out = x
for layer in self.children():
out = layer(out)
out = x + out
return out
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim,
num_heads,
mlp_ratio=4*2/3,
qkv_bias=True,
drop_path=0.0,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
window_size=0,
use_residual_block=False,
rope=None,
xattn=True,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
drop_path (float): Stochastic depth rate.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then not
use window attention.
use_residual_block (bool): If True, use a residual block after the MLP block.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
rope=rope,
xattn=xattn,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = SwiGLU(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
subln=True,
norm_layer=norm_layer,
)
self.window_size = window_size
self.use_residual_block = use_residual_block
if use_residual_block:
# Use a residual block with bottleneck channel as dim // 2
self.residual = ResBottleneckBlock(
in_channels=dim,
out_channels=dim,
bottleneck_channels=dim // 2,
norm="LN",
)
def forward(self, x):
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0: | x = window_unpartition(x, self.window_size, pad_hw, (H, W)) | 4 | 2023-12-15 01:12:36+00:00 | 8k |
SHI-Labs/VCoder | vcoder_llava/eval/model_seg_loader.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "SEG_TOKEN_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "SEG_TOKEN_INDEX = -300"
},
{
"identifier": "DEFAULT_SEG_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_SEG_TOKEN = \"<seg>\""
},
{
"identifier": "conv_templates",
"path": "vcoder_llava/vcoder_conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass VCoderConversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = seg.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = depth.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_segs(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def get_depths(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):"
},
{
"identifier": "load_pretrained_model",
"path": "vcoder_llava/model/builder.py",
"snippet": "def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4'\n )\n else:\n kwargs['torch_dtype'] = torch.float16\n if 'llava' in model_name.lower():\n # Load LLaVA model\n if 'lora' in model_name.lower() and model_base is None:\n warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')\n if 'lora' in model_name.lower() and model_base is not None:\n lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n if 'vcoder_it' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n model = VCoderITLlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n else:\n print('Loading LLaVA from base model...')\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)\n token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features\n if model.lm_head.weight.shape[0] != token_num:\n model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))\n\n print('Loading additional weights...')\n if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):\n non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')\n else:\n # this is probably from HF Hub\n from huggingface_hub import hf_hub_download\n def load_from_hf(repo_id, filename, subfolder=None):\n cache_file = hf_hub_download(\n repo_id=repo_id,\n filename=filename,\n subfolder=subfolder)\n return torch.load(cache_file, map_location='cpu')\n non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')\n non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}\n if any(k.startswith('model.model.') for k in non_lora_trainables):\n non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}\n model.load_state_dict(non_lora_trainables, strict=False)\n\n from peft import PeftModel\n print('Loading LoRA weights...')\n model = PeftModel.from_pretrained(model, model_path)\n print('Merging LoRA weights...')\n model = model.merge_and_unload()\n print('Model is loaded...')\n elif model_base is not None:\n # this may be mm projector only\n print('Loading LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n cfg_pretrained = AutoConfig.from_pretrained(model_path)\n model = LlavaLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)\n\n mm_projector_weights = torch.load(os.path.join(model_path, 'mm_projector.bin'), map_location='cpu')\n mm_projector_weights = {k: v.to(torch.float16) for k, v in mm_projector_weights.items()}\n model.load_state_dict(mm_projector_weights, strict=False)\n else:\n if 'vcoder_it_llava' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = VCoderITLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n elif 'vcoder_ds_llava' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = VCoderDSLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n elif 'vcoder_llava' in model_name.lower():\n print('Loading VCoder LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = VCoderLlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs) \n else:\n print('Loading LLaVA from base model...')\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = LlavaLlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n else:\n # Load language model\n if model_base is not None:\n # PEFT model\n from peft import PeftModel\n tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_base, torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=\"auto\")\n print(f\"Loading LoRA weights from {model_path}\")\n model = PeftModel.from_pretrained(model, model_path)\n print(f\"Merging weights\")\n model = model.merge_and_unload()\n print('Convert to FP16...')\n model.to(torch.float16)\n else:\n use_fast = False\n if 'mpt' in model_name.lower():\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, trust_remote_code=True, **kwargs)\n else:\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)\n\n image_processor = None\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n if 'llava' in model_name.lower():\n vision_tower = model.get_vision_tower()\n if not vision_tower.is_loaded:\n vision_tower.load_model()\n vision_tower.to(device=device, dtype=torch.float16)\n image_processor = vision_tower.image_processor\n \n seg_image_processor = None\n if 'vcoder' in model_name.lower():\n seg_image_processor = image_processor\n \n depth_image_processor = None\n if \"ds\" in model_name.lower():\n depth_image_processor = image_processor\n\n model.requires_grad_(False)\n return tokenizer, model, image_processor, seg_image_processor, depth_image_processor, context_len"
},
{
"identifier": "disable_torch_init",
"path": "vcoder_llava/utils.py",
"snippet": "def disable_torch_init():\n \"\"\"\n Disable the redundant torch default initialization to accelerate model creation.\n \"\"\"\n import torch\n setattr(torch.nn.Linear, \"reset_parameters\", lambda self: None)\n setattr(torch.nn.LayerNorm, \"reset_parameters\", lambda self: None)"
},
{
"identifier": "process_images",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def process_images(images, image_processor, model_cfg):\n image_aspect_ratio = getattr(model_cfg, \"image_aspect_ratio\", None)\n new_images = []\n if image_aspect_ratio == 'pad':\n for image in images:\n image = expand2square(image, tuple(int(x*255) for x in image_processor.image_mean))\n image = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n else:\n return image_processor(images, return_tensors='pt')['pixel_values']\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images"
},
{
"identifier": "tokenizer_seg_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_seg_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, seg_token_index=SEG_TOKEN_INDEX, return_tensors=None): \n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<seg>\\n<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n \n for x in insert_separator(prompt_chunks, [seg_token_index, image_token_index] * (offset + 1)):\n if seg_token_index in x:\n input_ids.extend(x[offset:-1])\n else:\n input_ids.extend(x[offset:])\n \n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "tokenizer_image_token",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split('<image>')]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids"
},
{
"identifier": "get_model_name_from_path",
"path": "vcoder_llava/mm_utils.py",
"snippet": "def get_model_name_from_path(model_path):\n model_path = model_path.strip(\"/\")\n model_paths = model_path.split(\"/\")\n if model_paths[-1].startswith('checkpoint-'):\n return model_paths[-2] + \"_\" + model_paths[-1]\n else:\n return model_paths[-1]"
},
{
"identifier": "KeywordsStoppingCriteria",
"path": "vcoder_llava/mm_utils.py",
"snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:\n assert output_ids.shape[0] == 1, \"Only support batch size 1 (yet)\" # TODO\n offset = min(output_ids.shape[1] - self.start_len, 3)\n self.keyword_ids = [keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids]\n for keyword_id in self.keyword_ids:\n if output_ids[0, -keyword_id.shape[0]:] == keyword_id:\n return True\n outputs = self.tokenizer.batch_decode(output_ids[:, -offset:], skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False"
},
{
"identifier": "QUESTIONS",
"path": "vcoder_llava/questions.py",
"snippet": "QUESTIONS = {\n 'semantic': SEMANTIC_QUESTIONS,\n 'instance': INSTANCE_QUESTIONS,\n 'panoptic': PANOPTIC_QUESTIONS,\n 'depth': DEPTH_QUESTIONS,\n}"
}
] | import argparse
import torch
import os
import json
import shortuuid
import random
import glob
import math
from tqdm import tqdm
from vcoder_llava.constants import (
IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN,
SEG_TOKEN_INDEX, DEFAULT_SEG_TOKEN,
)
from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle
from vcoder_llava.model.builder import load_pretrained_model
from vcoder_llava.utils import disable_torch_init
from vcoder_llava.mm_utils import process_images, tokenizer_seg_token, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from torch.utils.data import Dataset, DataLoader
from vcoder_llava.questions import QUESTIONS
from PIL import Image | 4,770 |
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
# Custom dataset class
class CustomDataset(Dataset):
def __init__(self, questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config):
self.questions = questions
self.image_folder = args.image_folder
self.seg_image_folder = seg_image_folder
self.images = glob.glob(os.path.join(args.image_folder, '*.jpg'))
self.images = get_chunk(self.images, args.num_chunks, args.chunk_idx)
if seg_image_folder is not None:
self.seg_images = glob.glob(os.path.join(seg_image_folder, '*.jpg'))
self.seg_images = get_chunk(self.seg_images, args.num_chunks, args.chunk_idx)
assert len(self.images) == len(self.seg_images), f"Number of images ({len(self.images)}) and seg images ({len(self.seg_images)}) must be the same"
else:
self.seg_images = None
self.tokenizer = tokenizer
self.image_processor = image_processor
self.seg_image_processor = seg_image_processor
self.model_config = model_config
def __getitem__(self, index):
image_file = self.images[index]
if self.seg_images is not None:
seg_image_file = self.seg_images[index]
else:
seg_image_file = None
ques = random.choice(self.questions)
qs = DEFAULT_IMAGE_TOKEN + '\n' + ques
image = Image.open(os.path.join(image_file)).convert('RGB')
image_tensor = process_images([image], self.image_processor, self.model_config)[0]
if seg_image_file is not None:
seg_image = Image.open(os.path.join(seg_image_file)).convert('RGB')
seg_image_tensor = process_images([seg_image], self.seg_image_processor, self.model_config)[0]
qs = DEFAULT_SEG_TOKEN + '\n' + qs
else:
seg_image_tensor = image_tensor
qs = qs + " Return the answer in the paragraph format: 'The objects present in the image are: ...' and then list the objects with their count in word format (if greater than 1) in front of them, like 'two people'."
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
if seg_image_file is None:
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
else:
input_ids = tokenizer_seg_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, return_tensors='pt')
return input_ids, image_tensor, seg_image_tensor, image_file.split("/")[-1], ques
def __len__(self):
return len(self.images)
# DataLoader
def create_data_loader(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config, batch_size=1, num_workers=4):
assert batch_size == 1, "batch_size must be 1"
dataset = CustomDataset(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config)
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
return data_loader
def eval_model(args, task):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
tokenizer, model, image_processor, seg_image_processor, _, context_len = load_pretrained_model(model_path, args.model_base, model_name)
questions = QUESTIONS[task]
answers_file = os.path.expanduser(args.output_file)
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
answers_file = answers_file + f'_{task}_{args.num_chunks}_{args.chunk_idx}.txt'
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
args.conv_mode = args.conv_mode + '_mmtag'
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
if not args.use_seg:
seg_image_folder = None
else:
seg_image_folder = os.path.join(args.seg_image_folder, f'{task}_inference')
data_loader = create_data_loader(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model.config)
for input_ids, image_tensor, seg_image_tensor, image_file, ques in tqdm(data_loader, total=len(data_loader), desc=f'Generating {task} answers...'):
|
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
# Custom dataset class
class CustomDataset(Dataset):
def __init__(self, questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config):
self.questions = questions
self.image_folder = args.image_folder
self.seg_image_folder = seg_image_folder
self.images = glob.glob(os.path.join(args.image_folder, '*.jpg'))
self.images = get_chunk(self.images, args.num_chunks, args.chunk_idx)
if seg_image_folder is not None:
self.seg_images = glob.glob(os.path.join(seg_image_folder, '*.jpg'))
self.seg_images = get_chunk(self.seg_images, args.num_chunks, args.chunk_idx)
assert len(self.images) == len(self.seg_images), f"Number of images ({len(self.images)}) and seg images ({len(self.seg_images)}) must be the same"
else:
self.seg_images = None
self.tokenizer = tokenizer
self.image_processor = image_processor
self.seg_image_processor = seg_image_processor
self.model_config = model_config
def __getitem__(self, index):
image_file = self.images[index]
if self.seg_images is not None:
seg_image_file = self.seg_images[index]
else:
seg_image_file = None
ques = random.choice(self.questions)
qs = DEFAULT_IMAGE_TOKEN + '\n' + ques
image = Image.open(os.path.join(image_file)).convert('RGB')
image_tensor = process_images([image], self.image_processor, self.model_config)[0]
if seg_image_file is not None:
seg_image = Image.open(os.path.join(seg_image_file)).convert('RGB')
seg_image_tensor = process_images([seg_image], self.seg_image_processor, self.model_config)[0]
qs = DEFAULT_SEG_TOKEN + '\n' + qs
else:
seg_image_tensor = image_tensor
qs = qs + " Return the answer in the paragraph format: 'The objects present in the image are: ...' and then list the objects with their count in word format (if greater than 1) in front of them, like 'two people'."
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
if seg_image_file is None:
input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')
else:
input_ids = tokenizer_seg_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, return_tensors='pt')
return input_ids, image_tensor, seg_image_tensor, image_file.split("/")[-1], ques
def __len__(self):
return len(self.images)
# DataLoader
def create_data_loader(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config, batch_size=1, num_workers=4):
assert batch_size == 1, "batch_size must be 1"
dataset = CustomDataset(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model_config)
data_loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False)
return data_loader
def eval_model(args, task):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
tokenizer, model, image_processor, seg_image_processor, _, context_len = load_pretrained_model(model_path, args.model_base, model_name)
questions = QUESTIONS[task]
answers_file = os.path.expanduser(args.output_file)
os.makedirs(os.path.dirname(answers_file), exist_ok=True)
answers_file = answers_file + f'_{task}_{args.num_chunks}_{args.chunk_idx}.txt'
if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
args.conv_mode = args.conv_mode + '_mmtag'
print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
if not args.use_seg:
seg_image_folder = None
else:
seg_image_folder = os.path.join(args.seg_image_folder, f'{task}_inference')
data_loader = create_data_loader(questions, args, seg_image_folder, tokenizer, image_processor, seg_image_processor, model.config)
for input_ids, image_tensor, seg_image_tensor, image_file, ques in tqdm(data_loader, total=len(data_loader), desc=f'Generating {task} answers...'):
| stop_str = conv_templates[args.conv_mode].sep if conv_templates[args.conv_mode].sep_style != SeparatorStyle.TWO else conv_templates[args.conv_mode].sep2 | 4 | 2023-12-17 07:46:27+00:00 | 8k |
OSU-NLP-Group/SeeAct | src/data_utils/prompts.py | [
{
"identifier": "data_format_input_multichoice",
"path": "src/data_utils/format_prompt_utils.py",
"snippet": "def data_format_input_multichoice(\n sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False\n):\n # Parse html into a dom tree\n dom_tree = lxml.etree.fromstring(sample[\"cleaned_html\"])\n dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids)\n tree_repr, id_mapping = get_tree_repr(\n dom_tree, id_mapping={}, keep_html_brackets=keep_html_brackets\n )\n candidate_nodes = dom_tree.xpath(\"//*[@backend_node_id]\")\n choices = []\n for idx, node in enumerate(candidate_nodes):\n temp = get_tree_repr(\n node,\n id_mapping=id_mapping,\n keep_html_brackets=keep_html_brackets,\n )\n choices.append(\n [\n node.attrib[\"backend_node_id\"],\n \" \".join(\n get_tree_repr(\n node,\n id_mapping=id_mapping,\n keep_html_brackets=keep_html_brackets,\n )[0].split()[:10]\n ),\n ]\n )\n gt = id_mapping.get(gt, -1)\n seq_input = (\n \"Based on the HTML webpage above, try to complete the following task:\\n\"\n f\"Task: {sample['confirmed_task']}\\n\"\n f\"Previous actions:\\n\"\n )\n if len(sample[\"previous_actions\"]) > 0:\n for action in sample[\"previous_actions\"][-previous_k:]:\n seq_input += f\"{action}\\n\"\n else:\n seq_input += \"None\\n\"\n seq_input += (\n \"What should be the next action? Please select from the following choices \"\n \"(If the correct action is not in the page above, please select A. 'None of the above'):\\n\\n\"\n \"A. None of the above\\n\"\n )\n for idx, choice in enumerate(choices):\n # convert to ascii A, B, C, D, ...\n seq_input += f\"{chr(66 + idx)}. {choice[1]}\\n\"\n if gt == -1:\n seq_target = \"A.\"\n else:\n gt += 1\n current_action_op = sample[\"operation\"][\"op\"]\n current_action_value = sample[\"operation\"][\"value\"]\n seq_target = f\"{chr(65 + gt)}.\\n\" f\"Action: {current_action_op}\\n\"\n if current_action_op != \"CLICK\":\n seq_target += f\"Value: {current_action_value}\"\n return tree_repr, seq_input, seq_target, choices, node_to_keep"
},
{
"identifier": "format_options",
"path": "src/data_utils/format_prompt_utils.py",
"snippet": "def format_options(choices):\n option_text = \"\"\n abcd = ''\n non_abcd = ''\n\n multi_choice = ''\n for multichoice_idx, choice in enumerate(choices):\n multi_choice += f\"{generate_option_name(multichoice_idx)}. {choice[1]}\\n\"\n abcd += f\"{generate_option_name(multichoice_idx)}, \"\n\n non_abcd = generate_option_name(multichoice_idx + 1)\n\n multi_choice += f\"{non_abcd}. None of the other options match the correct element\"\n # option_text += abcd\n option_text += f\"If none of these elements match your target element, please select {non_abcd}. None of the other options match the correct element.\\n\"\n\n option_text += (multi_choice + '\\n\\n')\n return option_text"
},
{
"identifier": "generate_option_name",
"path": "src/data_utils/format_prompt_utils.py",
"snippet": "def generate_option_name(index):\n if index < 26:\n return string.ascii_uppercase[index]\n else:\n first_letter_index = (index - 26) // 26\n second_letter_index = (index - 26) % 26\n first_letter = string.ascii_uppercase[first_letter_index]\n second_letter = string.ascii_uppercase[second_letter_index]\n return f\"{first_letter}{second_letter}\""
},
{
"identifier": "generate_new_referring_prompt",
"path": "src/data_utils/format_prompt_utils.py",
"snippet": "def generate_new_referring_prompt(referring_description=\"\", element_format=\"\", action_format=\"\", value_format=\"\",\n choices=None,split=\"4\"):\n referring_prompt = \"\"\n\n # Add description about how to format output\n if referring_description != \"\":\n referring_prompt += referring_description\n referring_prompt += \"\\n\\n\"\n\n # Add element prediction format and choices\n\n\n # Prepare Option texts\n # For exp {1, 2, 4}, generate option\n # For element_atttribute, set options field at None\n if choices:\n choice_text = format_options(choices)\n referring_prompt += choice_text\n\n if element_format != \"\":\n referring_prompt += element_format\n referring_prompt += \"\\n\\n\"\n\n # Format Action Prediction\n if action_format != \"\":\n referring_prompt += action_format\n referring_prompt += \"\\n\\n\"\n\n # Format Value Prediction\n if value_format != \"\":\n referring_prompt += value_format\n referring_prompt += \"\"\n\n return referring_prompt"
},
{
"identifier": "generate_new_query_prompt",
"path": "src/data_utils/format_prompt_utils.py",
"snippet": "def generate_new_query_prompt(system_prompt=\"\", task=\"\", previous_actions=None, question_description=\"\"):\n \"\"\"\n Generate the first phase prompt to ask model to generate general descriptions about {environment, high-level plans, next step action}\n Each experiment will have a similar prompt in this phase\n This prompt is used to generate models' thoughts without disrupt of formatting/referring prompts\n \"\"\"\n sys_role=\"\"+system_prompt\n query_text = \"\"\n\n # System Prompt\n query_text += \"You are asked to complete the following task: \"\n\n # Task Description\n query_text += task\n query_text += \"\\n\\n\"\n\n # Previous Actions\n previous_action_text = \"Previous Actions:\\n\"\n if previous_actions is None:\n previous_actions = []\n for action_text in previous_actions:\n previous_action_text += action_text\n previous_action_text += \"\\n\"\n query_text += previous_action_text\n query_text += \"\\n\"\n\n # Question Description\n query_text += question_description\n return [sys_role,query_text]"
}
] | from .format_prompt_utils import data_format_input_multichoice, \
format_options, generate_option_name, generate_new_referring_prompt, generate_new_query_prompt | 4,876 |
exp3_prompt_dict = {
"system_prompt": sys_prompt,
"question_description": question_description_new_exp3,
"referring_description": f"""""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element, element type, element text, action and value should be in five separate lines.
Format:
ELEMENT: Please describe which element you need to operate with. Describe it as detailed as possible, including what it is and where it is.
ELEMENT TYPE: Please specify its type from these options: BUTTON, TEXTBOX, SELECTBOX, or LINK.
ELEMENT TEXT: Please provide the exact text displayed on the element. Do not invent or modify the text; reproduce it as-is from the screenshot.""",
"action_format": f"{action_format}",
"value_format": f"{value_format}"
}
##### SeeAct Online Prompts
seeact_online_sys_prompt = '''Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() and keyboard.press('Enter') functions in playwright respectively) One next step means one operation within the four. Unlike humans, for typing (e.g., in text areas, text boxes) and selecting (e.g., from dropdown menus or <select> elements), you should try directly typing the input or selecting the choice, bypassing the need for an initial click. You should not attempt to create accounts, log in or do the final submission. Terminate when you deem the task complete or if it requires potentially harmful actions.'''
seeact_online_question_description_new_exp4 = '''The screenshot below shows the webpage you see. Follow the following guidance to think step by step before outlining the next action step at the current stage:
(Current Webpage Identification)
Firstly, think about what the current webpage is.
(Previous Action Analysis)
Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step. Specifically, if the last action involved a TYPE, always evaluate whether it necessitates a confirmation step, because typically a single TYPE action does not make effect. (often, simply pressing 'Enter', assuming the default element involved in the last action, unless other clear elements are present for operation).
(Screenshot Details Analysis)
Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
(Next Action Based on Webpage and Analysis)
Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
To be successful, it is important to follow the following rules:
1. You should only issue a valid action given the current observation.
2. You should only issue one action at a time
3. For handling the select dropdown elements on the webpage, it's not necessary for you to provide completely accurate options right now. The full list of options for these elements will be supplied later.'''
seeact_online_action_format = "ACTION: Choose an action from {CLICK, SELECT, TYPE, PRESS ENTER, TERMINATE, NONE}."
seeact_online_value_format = "VALUE: Provide additional input based on ACTION.\n\nThe VALUE means:\nIf ACTION == TYPE, specify the " \
"text to be typed.\nIf ACTION == SELECT, indicate the option to be chosen. Revise the selection value to align with the available options within the element.\nIf ACTION == CLICK, PRESS ENTER, TERMINATE or NONE, " \
"write \"None\"."
seeact_choice_prompt_dict = {
"system_prompt": seeact_online_sys_prompt,
"question_description": seeact_online_question_description_new_exp4,
"referring_description": f"""(Reiteration)
First, reiterate your next target element, its detailed location, and the corresponding operation.
(Multichoice Question)
Below is a multi-choice question, where the choices are elements in the webpage. All elements are arranged in the order based on their height on the webpage, from top to bottom (and from left to right). This arrangement can be used to locate them. From the screenshot, find out where and what each one is on the webpage, taking into account both their text content and HTML details. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by re-examining the screenshot, the choices, and your further reasoning.""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
Format:
ELEMENT: The uppercase letter of your choice. (No need for PRESS ENTER)""",
"action_format": f"{seeact_online_action_format}",
"value_format": f"{seeact_online_value_format}"
}
def generate_prompt(experiment_split, task=None, previous=None, choices=None):
assert experiment_split != None, "Please specify the experiment split."
assert task != None, "Please input the task."
assert previous != None, "Please input the previous actions."
prompt_list = []
system_prompt_input = None
question_description_input = None
referring_input = None
element_format_input = None
action_format_input = None
value_format_input = None
if experiment_split in ["text","text_choice","4api"]:
system_prompt_input = exp4_prompt_dict["system_prompt"]
question_description_input = exp4_prompt_dict["question_description"]
referring_input = exp4_prompt_dict["referring_description"]
element_format_input = exp4_prompt_dict["element_format"]
action_format_input = exp4_prompt_dict["action_format"]
value_format_input = exp4_prompt_dict["value_format"]
prompt_list.extend(
generate_new_query_prompt(system_prompt=system_prompt_input, task=task, previous_actions=previous,
question_description=question_description_input))
prompt_list.append(
| # -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sys_prompt = '''Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click an element with the mouse, select an option, or type text with the keyboard. (For your understanding, they are like the click(), select_option() and type() functions in playwright respectively) One next step means one operation within the three.'''
action_format = "ACTION: Choose an action from {CLICK, TYPE, SELECT}."
value_format = "VALUE: Provide additional input based on ACTION.\n\nThe VALUE means:\nIf ACTION == TYPE, specify the " \
"text to be typed.\nIf ACTION == SELECT, specify the option to be chosen.\nIf ACTION == CLICK, " \
"write \"None\"."
question_description_new_exp4 = '''The screenshot below shows the webpage you see. Follow the following guidance to think step by step before outlining the next action step at the current stage:
(Current Webpage Identification)
Firstly, think about what the current webpage is.
(Previous Action Analysis)
Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step.
(Screenshot Details Analysis)
Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
(Next Action Based on Webpage and Analysis)
Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
To be successful, it is important to follow the following rules:
1. You should only issue a valid action given the current observation.
2. You should only issue one action at a time'''
question_description_new_exp2 = '''The screenshot below shows the webpage you see. In the screenshot, some red bounding boxes and white-on-black uppercase letters at the bottom left corner of the bounding boxes have been manually added. You should ignore them for now. Follow the following guidance to think step by step before outlining the next action step at the current stage:
(Current Webpage Identification)
Firstly, think about what the current webpage is.
(Previous Action Analysis)
Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step.
(Screenshot Details Analysis)
Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
(Next Action Based on Webpage and Analysis)
Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
To be successful, it is important to follow the following rules:
1. You should only issue a valid action given the current observation.
2. You should only issue one action at a time.'''
question_description_new_exp3 = '''The screenshot below shows the webpage you see. Follow the following guidance to think step by step before outlining the next action step at the current stage:
(Current Webpage Identification)
Firstly, think about what the current webpage is.
(Previous Action Analysis)
Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step.
(Screenshot Details Analysis)
Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
(Next Action Based on Webpage and Analysis)
Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation. Please also closely examine the screenshot to adequately describe its position relative to nearby elements and its textual or visual content (if it has). If you find multiple elements similar to your target element, use a more precise description to ensure people can distinguish your target element from them through your answer.
To be successful, it is important to follow the following rules:
1. You should only issue a valid action given the current observation.
2. You should only issue one action at a time.'''
exp4_prompt_dict = {
"system_prompt": sys_prompt,
"question_description": question_description_new_exp4,
"referring_description": f"""(Reiteration)
First, reiterate your next target element, its detailed location, and the corresponding operation.
(Multichoice Question)
Below is a multi-choice question, where the choices are elements in the webpage. From the screenshot, find out where and what each one is on the webpage. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by re-examining the screenshot, the choices, and your further reasoning.""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
Format:
ELEMENT: The uppercase letter of your choice.""",
"action_format": f"{action_format}",
"value_format": f"{value_format}"
}
exp2_prompt_dict = {
"system_prompt": sys_prompt,
"question_description": question_description_new_exp2,
"referring_description": f"""(Reiteration)
First, reiterate your next target element, its detailed location, and the corresponding operation.
(Verification with the Screenshot)
Then, please closely re-examine the screenshot to find whether your target element is marked by a red bounding box and has a white uppercase letter on a black background at the bottom left corner of the bounding box, which is positioned closely next to the bounding box. If yes, use that letter for your final answer. If not, please do not make them up. If it is not marked, please output "NA" as your target element in the following final answer part.""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
Format:
ELEMENT: The uppercase letter of your choice.""",
"action_format": f"{action_format}",
"value_format": f"{value_format}"
}
exp3_prompt_dict = {
"system_prompt": sys_prompt,
"question_description": question_description_new_exp3,
"referring_description": f"""""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element, element type, element text, action and value should be in five separate lines.
Format:
ELEMENT: Please describe which element you need to operate with. Describe it as detailed as possible, including what it is and where it is.
ELEMENT TYPE: Please specify its type from these options: BUTTON, TEXTBOX, SELECTBOX, or LINK.
ELEMENT TEXT: Please provide the exact text displayed on the element. Do not invent or modify the text; reproduce it as-is from the screenshot.""",
"action_format": f"{action_format}",
"value_format": f"{value_format}"
}
##### SeeAct Online Prompts
seeact_online_sys_prompt = '''Imagine that you are imitating humans doing web navigation for a task step by step. At each stage, you can see the webpage like humans by a screenshot and know the previous actions before the current step decided by yourself through recorded history. You need to decide on the first following action to take. You can click on an element with the mouse, select an option, type text or press Enter with the keyboard. (For your understanding, they are like the click(), select_option() type() and keyboard.press('Enter') functions in playwright respectively) One next step means one operation within the four. Unlike humans, for typing (e.g., in text areas, text boxes) and selecting (e.g., from dropdown menus or <select> elements), you should try directly typing the input or selecting the choice, bypassing the need for an initial click. You should not attempt to create accounts, log in or do the final submission. Terminate when you deem the task complete or if it requires potentially harmful actions.'''
seeact_online_question_description_new_exp4 = '''The screenshot below shows the webpage you see. Follow the following guidance to think step by step before outlining the next action step at the current stage:
(Current Webpage Identification)
Firstly, think about what the current webpage is.
(Previous Action Analysis)
Secondly, combined with the screenshot, analyze each step of the previous action history and their intention one by one. Particularly, pay more attention to the last step, which may be more related to what you should do now as the next step. Specifically, if the last action involved a TYPE, always evaluate whether it necessitates a confirmation step, because typically a single TYPE action does not make effect. (often, simply pressing 'Enter', assuming the default element involved in the last action, unless other clear elements are present for operation).
(Screenshot Details Analysis)
Closely examine the screenshot to check the status of every part of the webpage to understand what you can operate with and what has been set or completed. You should closely examine the screenshot details to see what steps have been completed by previous actions even though you are given the textual previous actions. Because the textual history may not clearly and sufficiently record some effects of previous actions, you should closely evaluate the status of every part of the webpage to understand what you have done.
(Next Action Based on Webpage and Analysis)
Then, based on your analysis, in conjunction with human web browsing habits and the logic of web design, decide on the following action. And clearly outline which element in the webpage users will operate with as the first next target element, its detailed location, and the corresponding operation.
To be successful, it is important to follow the following rules:
1. You should only issue a valid action given the current observation.
2. You should only issue one action at a time
3. For handling the select dropdown elements on the webpage, it's not necessary for you to provide completely accurate options right now. The full list of options for these elements will be supplied later.'''
seeact_online_action_format = "ACTION: Choose an action from {CLICK, SELECT, TYPE, PRESS ENTER, TERMINATE, NONE}."
seeact_online_value_format = "VALUE: Provide additional input based on ACTION.\n\nThe VALUE means:\nIf ACTION == TYPE, specify the " \
"text to be typed.\nIf ACTION == SELECT, indicate the option to be chosen. Revise the selection value to align with the available options within the element.\nIf ACTION == CLICK, PRESS ENTER, TERMINATE or NONE, " \
"write \"None\"."
seeact_choice_prompt_dict = {
"system_prompt": seeact_online_sys_prompt,
"question_description": seeact_online_question_description_new_exp4,
"referring_description": f"""(Reiteration)
First, reiterate your next target element, its detailed location, and the corresponding operation.
(Multichoice Question)
Below is a multi-choice question, where the choices are elements in the webpage. All elements are arranged in the order based on their height on the webpage, from top to bottom (and from left to right). This arrangement can be used to locate them. From the screenshot, find out where and what each one is on the webpage, taking into account both their text content and HTML details. Then, determine whether one matches your target element. Please examine the choices one by one. Choose the matching one. If multiple options match your answer, choose the most likely one by re-examining the screenshot, the choices, and your further reasoning.""",
"element_format": """(Final Answer)
Finally, conclude your answer using the format below. Ensure your answer is strictly adhering to the format provided below. Please do not leave any explanation in your answers of the final standardized format part, and this final part should be clear and certain. The element choice, action, and value should be in three separate lines.
Format:
ELEMENT: The uppercase letter of your choice. (No need for PRESS ENTER)""",
"action_format": f"{seeact_online_action_format}",
"value_format": f"{seeact_online_value_format}"
}
def generate_prompt(experiment_split, task=None, previous=None, choices=None):
assert experiment_split != None, "Please specify the experiment split."
assert task != None, "Please input the task."
assert previous != None, "Please input the previous actions."
prompt_list = []
system_prompt_input = None
question_description_input = None
referring_input = None
element_format_input = None
action_format_input = None
value_format_input = None
if experiment_split in ["text","text_choice","4api"]:
system_prompt_input = exp4_prompt_dict["system_prompt"]
question_description_input = exp4_prompt_dict["question_description"]
referring_input = exp4_prompt_dict["referring_description"]
element_format_input = exp4_prompt_dict["element_format"]
action_format_input = exp4_prompt_dict["action_format"]
value_format_input = exp4_prompt_dict["value_format"]
prompt_list.extend(
generate_new_query_prompt(system_prompt=system_prompt_input, task=task, previous_actions=previous,
question_description=question_description_input))
prompt_list.append( | generate_new_referring_prompt(referring_description=referring_input, element_format=element_format_input, | 3 | 2023-12-21 18:22:11+00:00 | 8k |
DeepWok/mase | machop/chop/models/manual/opt_quantized/modeling_opt.py | [
{
"identifier": "get_quantized_cls",
"path": "machop/chop/models/manual/quant_utils.py",
"snippet": "def get_quantized_cls(mase_op: str, quant_config: dict) -> type:\n quant_arith = quant_config[\"name\"]\n return quantized_module_map[f\"{mase_op}_{quant_arith}\"]"
},
{
"identifier": "get_quantized_func",
"path": "machop/chop/models/manual/quant_utils.py",
"snippet": "def get_quantized_func(mase_op: str, quant_config: dict) -> Callable:\n quant_arith = quant_config[\"name\"]\n return quantized_func_map[f\"{mase_op}_{quant_arith}\"]"
},
{
"identifier": "OPTQuantizedConfig",
"path": "machop/chop/models/manual/opt_quantized/configuration_opt.py",
"snippet": "class OPTQuantizedConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`OPTModel`]. It is used to instantiate a OPT model\n according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the OPT\n [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50272):\n Vocabulary size of the OPT model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`OPTModel`]\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of decoder layers.\n ffn_dim (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n do_layer_norm_before (`bool`, *optional*, defaults to `True`):\n Whether to perform layer normalization before the attention block.\n word_embed_proj_dim (`int`, *optional*):\n `word_embed_proj_dim` can be set to down-project word embeddings, *e.g.* `opt-350m`. Defaults to\n `hidden_size`.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more\n details.\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n enable_bias (`bool`, *optional*, defaults to `True`):\n Whether or not if the linear layers in the attention blocks should use the bias term.\n layer_norm_elementwise_affine (`bool`, *optional*, defaults to `True`):\n Whether or not if the layer norms should have learnable parameters.\n\n Example:\n\n ```python\n >>> from transformers import OPTConfig, OPTModel\n\n >>> # Initializing a OPT facebook/opt-large style configuration\n >>> configuration = OPTConfig()\n\n >>> # Initializing a model (with random weights) from the facebook/opt-large style configuration\n >>> model = OPTModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"opt\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=50272,\n hidden_size=768,\n num_hidden_layers=12,\n ffn_dim=3072,\n max_position_embeddings=2048,\n do_layer_norm_before=True,\n _remove_final_layer_norm=False,\n word_embed_proj_dim=None,\n dropout=0.1,\n attention_dropout=0.0,\n num_attention_heads=12,\n activation_function=\"relu\",\n layerdrop=0.0,\n init_std=0.02,\n use_cache=True,\n pad_token_id=1,\n bos_token_id=2,\n eos_token_id=2,\n enable_bias=True,\n layer_norm_elementwise_affine=True,\n quant_config: dict | str = None,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.num_attention_heads = num_attention_heads\n self.word_embed_proj_dim = (\n word_embed_proj_dim if word_embed_proj_dim is not None else hidden_size\n )\n self.ffn_dim = ffn_dim\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_function = activation_function\n self.init_std = init_std\n self.layerdrop = layerdrop\n self.use_cache = use_cache\n self.do_layer_norm_before = do_layer_norm_before\n # We keep these variables at `True` for backward compatibility.\n self.enable_bias = enable_bias\n self.layer_norm_elementwise_affine = layer_norm_elementwise_affine\n\n # Note that the only purpose of `_remove_final_layer_norm` is to keep backward compatibility\n # with checkpoints that have been fine-tuned before transformers v4.20.1\n # see https://github.com/facebookresearch/metaseq/pull/164\n self._remove_final_layer_norm = _remove_final_layer_norm\n # if quant_config is not None:\n # quant_config = parse_opt_quantized_config(quant_config, num_hidden_layers)\n\n self.quant_config = quant_config\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n **kwargs,\n )\n\n def __setattr__(self, key, value):\n if key == \"quant_config\" and value is not None:\n value = parse_opt_quantized_config(\n config=value, num_hidden_layers=self.num_hidden_layers\n )\n return super().__setattr__(key, value)"
}
] | import random
import torch
import torch.utils.checkpoint
from typing import List, Optional, Tuple, Union
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ..quant_utils import get_quantized_cls, get_quantized_func
from .configuration_opt import OPTQuantizedConfig | 3,926 | # and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, attention_mask: torch.LongTensor, past_key_values_length: int = 0
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class OPTQauntizedAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
quant_config: dict = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
# fmt:off
self.k_proj = get_quantized_cls("linear", quant_config["k_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["k_proj"])
self.q_proj = get_quantized_cls("linear", quant_config["q_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["q_proj"])
self.v_proj = get_quantized_cls("linear", quant_config["v_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["v_proj"])
self.out_proj = get_quantized_cls("linear", quant_config["out_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["out_proj"])
self.quant_config = quant_config
# fmt:on
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
# *: bmm
# attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
# fmt:off
| # coding=utf-8
# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch OPT model."""
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "facebook/opt-350m"
_CONFIG_FOR_DOC = "OPTConfig"
# Base model docstring
_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
# SequenceClassification docstring
_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "ArthurZ/opt-350m-dummy-sc"
_SEQ_CLASS_EXPECTED_LOSS = 1.71
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'"
OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/opt-125m",
"facebook/opt-350m",
"facebook/opt-1.3b",
"facebook/opt-2.7b",
"facebook/opt-6.7b",
"facebook/opt-13b",
"facebook/opt-30b",
# See all OPT models at https://huggingface.co/models?filter=opt
]
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full(
(tgt_len, tgt_len),
torch.tensor(torch.finfo(dtype).min, device=device),
device=device,
)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[
torch.zeros(
tgt_len, past_key_values_length, dtype=dtype, device=device
),
mask,
],
dim=-1,
)
return mask[None, None, :, :].expand(
bsz, 1, tgt_len, tgt_len + past_key_values_length
)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(
inverted_mask.to(torch.bool), torch.finfo(dtype).min
)
class OPTLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, attention_mask: torch.LongTensor, past_key_values_length: int = 0
):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return super().forward(positions + self.offset)
class OPTQauntizedAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
quant_config: dict = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
# fmt:off
self.k_proj = get_quantized_cls("linear", quant_config["k_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["k_proj"])
self.q_proj = get_quantized_cls("linear", quant_config["q_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["q_proj"])
self.v_proj = get_quantized_cls("linear", quant_config["v_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["v_proj"])
self.out_proj = get_quantized_cls("linear", quant_config["out_proj"])(embed_dim, embed_dim, bias=bias, config=quant_config["out_proj"])
self.quant_config = quant_config
# fmt:on
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
# *: bmm
# attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
# fmt:off | attn_weights = get_quantized_func("bmm", self.quant_config["bmm_0"])(query_states, key_states.transpose(1, 2), config=self.quant_config["bmm_0"]) | 1 | 2023-12-18 12:50:53+00:00 | 8k |
byeongjun-park/HarmonyView | ldm/models/diffusion/sync_dreamer_attention.py | [
{
"identifier": "default",
"path": "ldm/modules/attention.py",
"snippet": "def exists(val):\ndef uniq(arr):\ndef default(val, d):\ndef max_neg_value(t):\ndef init_(tensor):\n def __init__(self, dim_in, dim_out):\n def forward(self, x):\n def __init__(self, dim_in, dim_out):\n def forward(self, x):\n def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):\n def forward(self, x):\ndef zero_module(module):\ndef Normalize(in_channels):\n def __init__(self, dim, heads=4, dim_head=32):\n def forward(self, x):\n def __init__(self, in_channels):\n def forward(self, x):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):\n def forward(self, x, context=None, mask=None):\n def __init__(self, dim, n_heads, d_head, context_dim=None, checkpoint=True):\n def forward(self, x, context=None):\n def _forward(self, x, context):\n def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, disable_self_attn=False):\n def forward(self, x, context=None):\n def _forward(self, x, context=None):\n def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):\n def forward(self, x):\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False):\n def forward(self, x, context=None):\nclass GEGLU(nn.Module):\nclass ConvGEGLU(nn.Module):\nclass FeedForward(nn.Module):\nclass LinearAttention(nn.Module):\nclass SpatialSelfAttention(nn.Module):\nclass CrossAttention(nn.Module):\nclass BasicSpatialTransformer(nn.Module):\nclass BasicTransformerBlock(nn.Module):\nclass ConvFeedForward(nn.Module):\nclass SpatialTransformer(nn.Module):"
},
{
"identifier": "UNetModel",
"path": "ldm/modules/diffusionmodules/openaimodel.py",
"snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n #self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\") # todo: convert to warning\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n ) # 0\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions: # always True\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) # N\n emb = self.time_embed(t_emb) #\n\n if self.num_classes is not None:\n assert y.shape == (x.shape[0],)\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context) # conv\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
}
] | import torch
import torch.nn as nn
from ldm.modules.attention import default, zero_module, checkpoint
from ldm.modules.diffusionmodules.openaimodel import UNetModel
from ldm.modules.diffusionmodules.util import timestep_embedding | 5,595 |
class DepthAttention(nn.Module):
def __init__(self, query_dim, context_dim, heads, dim_head, output_bias=True):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.scale = dim_head ** -0.5
self.heads = heads
self.dim_head = dim_head
self.to_q = nn.Conv2d(query_dim, inner_dim, 1, 1, bias=False)
self.to_k = nn.Conv3d(context_dim, inner_dim, 1, 1, bias=False)
self.to_v = nn.Conv3d(context_dim, inner_dim, 1, 1, bias=False)
if output_bias:
self.to_out = nn.Conv2d(inner_dim, query_dim, 1, 1)
else:
self.to_out = nn.Conv2d(inner_dim, query_dim, 1, 1, bias=False)
def forward(self, x, context):
"""
@param x: b,f0,h,w
@param context: b,f1,d,h,w
@return:
"""
hn, hd = self.heads, self.dim_head
b, _, h, w = x.shape
b, _, d, h, w = context.shape
q = self.to_q(x).reshape(b,hn,hd,h,w) # b,t,h,w
k = self.to_k(context).reshape(b,hn,hd,d,h,w) # b,t,d,h,w
v = self.to_v(context).reshape(b,hn,hd,d,h,w) # b,t,d,h,w
sim = torch.sum(q.unsqueeze(3) * k, 2) * self.scale # b,hn,d,h,w
attn = sim.softmax(dim=2)
# b,hn,hd,d,h,w * b,hn,1,d,h,w
out = torch.sum(v * attn.unsqueeze(2), 3) # b,hn,hd,h,w
out = out.reshape(b,hn*hd,h,w)
return self.to_out(out)
class DepthTransformer(nn.Module):
def __init__(self, dim, n_heads, d_head, context_dim=None, checkpoint=True):
super().__init__()
inner_dim = n_heads * d_head
self.proj_in = nn.Sequential(
nn.Conv2d(dim, inner_dim, 1, 1),
nn.GroupNorm(8, inner_dim),
nn.SiLU(True),
)
self.proj_context = nn.Sequential(
nn.Conv3d(context_dim, context_dim, 1, 1, bias=False), # no bias
nn.GroupNorm(8, context_dim),
nn.ReLU(True), # only relu, because we want input is 0, output is 0
)
self.depth_attn = DepthAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, context_dim=context_dim, output_bias=False) # is a self-attention if not self.disable_self_attn
self.proj_out = nn.Sequential(
nn.GroupNorm(8, inner_dim),
nn.ReLU(True),
nn.Conv2d(inner_dim, inner_dim, 3, 1, 1, bias=False),
nn.GroupNorm(8, inner_dim),
nn.ReLU(True),
zero_module(nn.Conv2d(inner_dim, dim, 3, 1, 1, bias=False)),
)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
def _forward(self, x, context):
x_in = x
x = self.proj_in(x)
context = self.proj_context(context)
x = self.depth_attn(x, context)
x = self.proj_out(x) + x_in
return x
class DepthWiseAttention(UNetModel):
def __init__(self, volume_dims=(5,16,32,64), *args, **kwargs):
super().__init__(*args, **kwargs)
# num_heads = 4
model_channels = kwargs['model_channels']
channel_mult = kwargs['channel_mult']
d0,d1,d2,d3 = volume_dims
# 4
ch = model_channels*channel_mult[2]
self.middle_conditions = DepthTransformer(ch, 4, d3 // 2, context_dim=d3)
self.output_conditions=nn.ModuleList()
self.output_b2c = {3:0,4:1,5:2,6:3,7:4,8:5,9:6,10:7,11:8}
# 8
ch = model_channels*channel_mult[2]
self.output_conditions.append(DepthTransformer(ch, 4, d2 // 2, context_dim=d2)) # 0
self.output_conditions.append(DepthTransformer(ch, 4, d2 // 2, context_dim=d2)) # 1
# 16
self.output_conditions.append(DepthTransformer(ch, 4, d1 // 2, context_dim=d1)) # 2
ch = model_channels*channel_mult[1]
self.output_conditions.append(DepthTransformer(ch, 4, d1 // 2, context_dim=d1)) # 3
self.output_conditions.append(DepthTransformer(ch, 4, d1 // 2, context_dim=d1)) # 4
# 32
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 5
ch = model_channels*channel_mult[0]
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 6
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 7
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 8
def forward(self, x, timesteps=None, context=None, source_dict=None, **kwargs):
hs = []
|
class DepthAttention(nn.Module):
def __init__(self, query_dim, context_dim, heads, dim_head, output_bias=True):
super().__init__()
inner_dim = dim_head * heads
context_dim = default(context_dim, query_dim)
self.scale = dim_head ** -0.5
self.heads = heads
self.dim_head = dim_head
self.to_q = nn.Conv2d(query_dim, inner_dim, 1, 1, bias=False)
self.to_k = nn.Conv3d(context_dim, inner_dim, 1, 1, bias=False)
self.to_v = nn.Conv3d(context_dim, inner_dim, 1, 1, bias=False)
if output_bias:
self.to_out = nn.Conv2d(inner_dim, query_dim, 1, 1)
else:
self.to_out = nn.Conv2d(inner_dim, query_dim, 1, 1, bias=False)
def forward(self, x, context):
"""
@param x: b,f0,h,w
@param context: b,f1,d,h,w
@return:
"""
hn, hd = self.heads, self.dim_head
b, _, h, w = x.shape
b, _, d, h, w = context.shape
q = self.to_q(x).reshape(b,hn,hd,h,w) # b,t,h,w
k = self.to_k(context).reshape(b,hn,hd,d,h,w) # b,t,d,h,w
v = self.to_v(context).reshape(b,hn,hd,d,h,w) # b,t,d,h,w
sim = torch.sum(q.unsqueeze(3) * k, 2) * self.scale # b,hn,d,h,w
attn = sim.softmax(dim=2)
# b,hn,hd,d,h,w * b,hn,1,d,h,w
out = torch.sum(v * attn.unsqueeze(2), 3) # b,hn,hd,h,w
out = out.reshape(b,hn*hd,h,w)
return self.to_out(out)
class DepthTransformer(nn.Module):
def __init__(self, dim, n_heads, d_head, context_dim=None, checkpoint=True):
super().__init__()
inner_dim = n_heads * d_head
self.proj_in = nn.Sequential(
nn.Conv2d(dim, inner_dim, 1, 1),
nn.GroupNorm(8, inner_dim),
nn.SiLU(True),
)
self.proj_context = nn.Sequential(
nn.Conv3d(context_dim, context_dim, 1, 1, bias=False), # no bias
nn.GroupNorm(8, context_dim),
nn.ReLU(True), # only relu, because we want input is 0, output is 0
)
self.depth_attn = DepthAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, context_dim=context_dim, output_bias=False) # is a self-attention if not self.disable_self_attn
self.proj_out = nn.Sequential(
nn.GroupNorm(8, inner_dim),
nn.ReLU(True),
nn.Conv2d(inner_dim, inner_dim, 3, 1, 1, bias=False),
nn.GroupNorm(8, inner_dim),
nn.ReLU(True),
zero_module(nn.Conv2d(inner_dim, dim, 3, 1, 1, bias=False)),
)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
def _forward(self, x, context):
x_in = x
x = self.proj_in(x)
context = self.proj_context(context)
x = self.depth_attn(x, context)
x = self.proj_out(x) + x_in
return x
class DepthWiseAttention(UNetModel):
def __init__(self, volume_dims=(5,16,32,64), *args, **kwargs):
super().__init__(*args, **kwargs)
# num_heads = 4
model_channels = kwargs['model_channels']
channel_mult = kwargs['channel_mult']
d0,d1,d2,d3 = volume_dims
# 4
ch = model_channels*channel_mult[2]
self.middle_conditions = DepthTransformer(ch, 4, d3 // 2, context_dim=d3)
self.output_conditions=nn.ModuleList()
self.output_b2c = {3:0,4:1,5:2,6:3,7:4,8:5,9:6,10:7,11:8}
# 8
ch = model_channels*channel_mult[2]
self.output_conditions.append(DepthTransformer(ch, 4, d2 // 2, context_dim=d2)) # 0
self.output_conditions.append(DepthTransformer(ch, 4, d2 // 2, context_dim=d2)) # 1
# 16
self.output_conditions.append(DepthTransformer(ch, 4, d1 // 2, context_dim=d1)) # 2
ch = model_channels*channel_mult[1]
self.output_conditions.append(DepthTransformer(ch, 4, d1 // 2, context_dim=d1)) # 3
self.output_conditions.append(DepthTransformer(ch, 4, d1 // 2, context_dim=d1)) # 4
# 32
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 5
ch = model_channels*channel_mult[0]
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 6
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 7
self.output_conditions.append(DepthTransformer(ch, 4, d0 // 2, context_dim=d0)) # 8
def forward(self, x, timesteps=None, context=None, source_dict=None, **kwargs):
hs = [] | t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) | 2 | 2023-12-21 04:44:00+00:00 | 8k |
OPPOMKLab/u-LLaVA | models/ullava.py | [
{
"identifier": "registry",
"path": "utils/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_collator(cls, name):\n def wrap(collator_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_processor_class(cls, name):\n def get_collator_class(cls, name):\n def get_task_class(cls, name):\n def list_models(cls):\n def list_processors(cls):\n def list_collators(cls):\n def list_builders(cls):\n def list_tasks(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):"
},
{
"identifier": "build_sam_vit_h",
"path": "models/segment_anything/build_sam.py",
"snippet": "def build_sam_vit_h(checkpoint=None):\n return _build_sam(\n encoder_embed_dim=1280,\n encoder_depth=32,\n encoder_num_heads=16,\n encoder_global_attn_indexes=[7, 15, 23, 31],\n checkpoint=checkpoint,\n )"
},
{
"identifier": "UllavaCoreConfig",
"path": "models/ullava_core.py",
"snippet": "class UllavaCoreConfig(LlamaConfig):\n model_type = \"ullava_core\"\n is_composition = True\n\n def __init__(self,\n vision_config=None,\n vision_hidden_layer=-1,\n projector_from_scratch=True,\n mm_token_ids=None,\n **kwargs\n ):\n super(UllavaCoreConfig, self).__init__(**kwargs)\n\n self.vision_hidden_layer = vision_hidden_layer\n self.mm_token_ids = mm_token_ids\n self.projector_from_scratch = projector_from_scratch\n\n self.vision_config = CLIPVisionConfig(**vision_config) if vision_config else {}\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict() if self.vision_config else {}\n output[\"vision_hidden_layer\"] = self.vision_hidden_layer\n output[\"mm_token_ids\"] = self.mm_token_ids\n output[\"projector_from_scratch\"] = self.projector_from_scratch\n output[\"model_type\"] = self.__class__.model_type\n return output"
},
{
"identifier": "UllavaCoreForCausalLM",
"path": "models/ullava_core.py",
"snippet": "class UllavaCoreForCausalLM(LlamaForCausalLM):\n config_class = UllavaCoreConfig\n\n def __init__(self, config: UllavaCoreConfig):\n \"\"\"\n keep the same structure with LlamaForCausalLM: model + lm_head\n :param config:\n Evallama2Config:\n -llm_config\n -vision_config\n -vision_hidden_layer\n\n EvaLLaMA^2:\n causal_llm:\n model (LlamaModel)\n lm_head (MLP)\n vision_encoder: CLIP\n vision_projector: MLP\n \"\"\"\n super(LlamaForCausalLM, self).__init__(config)\n\n self.config = config\n\n self.model = LlamaModel(config)\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.vision_encoder = CLIPVisionModel(config.vision_config)\n # projector from vision to LLM space: [1024, 4096]\n self.vision_projector = nn.Linear(config.vision_config.hidden_size, config.hidden_size)\n\n self.vision_hidden_layer = config.vision_hidden_layer\n self.projector_from_scratch = config.projector_from_scratch\n self.mm_token_ids = config.mm_token_ids\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self) -> nn.Module:\n return self.model.embed_tokens\n\n def get_output_embeddings(self) -> nn.Module:\n return self.lm_head\n\n def init_mm_tokens(self, tokenizer, mm_tokens):\n \"\"\"\n fetch mm_token_id from tokenizer\n :return:\n \"\"\"\n mm_token_ids = {k: tokenizer.convert_tokens_to_ids(v) for k, v in mm_tokens.items()}\n self.config.mm_token_ids = mm_token_ids\n self.mm_token_ids = self.config.mm_token_ids\n\n def encode_image(self, image_tensors):\n \"\"\"\n :param image_tensors:\n :return:\n \"\"\"\n with torch.no_grad():\n # For image: [bs, 3, 224, 224]\n image_forward_outs = self.vision_encoder(image_tensors, output_hidden_states=True)\n select_hidden_state = image_forward_outs.hidden_states[self.vision_hidden_layer]\n # remove CLS embedding [:, 1:]\n image_features = select_hidden_state[:, 1:] # [bs, num_patches=16*16, 1024]\n\n return image_features\n\n def encode_video(self, video_clip_tensors):\n \"\"\"\n :param video_clip_tensors: [bs, C, T, H, W]\n :return: temporal-spatial features\n \"\"\"\n bs = video_clip_tensors.size(0)\n # For video: [bs, n_frm, 3, 224, 224] -> [bs * n_frm, 3, 224, 224]\n video_clip_tensors = rearrange(video_clip_tensors, 'b c t h w -> (b t) c h w')\n with torch.no_grad():\n video_forward_outs = self.vision_encoder(video_clip_tensors, output_hidden_states=True)\n select_hidden_state = video_forward_outs.hidden_states[self.vision_hidden_layer]\n # Remove CLS embedding [:, 1:]\n video_features = select_hidden_state[:, 1:] # [bs * n_frm, num_patches=16*16, 1024]\n video_features = rearrange(video_features, '(b t) n d -> b t n d', b=bs) # [bs, n_frm, num_patches, 1024]\n\n spatial_features = video_features.mean(dim=1) # [bs, num_patches, 1024]\n temporal_features = video_features.mean(dim=2) # [bs, n_frm, 1024]\n\n st_features = torch.concat([temporal_features, spatial_features], dim=1) # [bs, (n_frm+num_patches), 1024]\n\n return st_features\n\n def embed_images_videos(self,\n input_ids: torch.LongTensor = None,\n images: Optional[torch.FloatTensor] = None,\n videos: Optional[torch.FloatTensor] = None\n ):\n\n if input_ids.shape[1] == 1:\n return input_ids, None\n\n inputs_embeds = self.model.embed_tokens(input_ids)\n\n # [bs, C, H, W] -> [bs, num_patches, 1024]\n image_features = self.encode_image(images) if images is not None else None\n # [bs, C, T, H, W] -> [bs, n_frm + num_patches, 1024]\n video_features = self.encode_video(videos) if videos is not None else None\n\n batch_input_embeds = []\n cur_image_idx, cur_video_idx = 0, 0\n\n img_start_id, img_end_id, vid_start_id, vid_end_id = \\\n self.mm_token_ids[\"IMG_START\"], self.mm_token_ids[\"IMG_END\"], \\\n self.mm_token_ids[\"VID_START\"], self.mm_token_ids[\"VID_END\"]\n\n for cur_input_ids, cur_input_embeds in zip(input_ids, inputs_embeds):\n num_img_start, num_img_end = (cur_input_ids == img_start_id).sum(), (cur_input_ids == img_end_id).sum()\n num_vid_start, num_vid_end = (cur_input_ids == vid_start_id).sum(), (cur_input_ids == vid_end_id).sum()\n\n assert num_img_start == num_img_end and num_vid_start == num_vid_end, \\\n print(\"Number of image start and end tokens should be the same. {0} vs {1}, {2} vs {3}\".\n format(num_img_start, num_img_end, num_vid_start, num_vid_end))\n\n if num_img_start == 0 and num_vid_start == 0:\n # if there are only texts in batch, for example SQA and Alpaca\n # if do not set this, will cause NCCL timeout (CUDA error)\n dummy_image_features = torch.zeros(256, 1024, device=inputs_embeds.device,\n dtype=inputs_embeds.dtype)\n dummy_image_features = self.vision_projector(dummy_image_features) # [bs, num_patch=16*16, 4096]\n cur_input_embeds = cur_input_embeds + (0. * dummy_image_features).sum()\n cur_new_input_embeds = cur_input_embeds\n\n elif num_img_start > 0:\n # if there are images in batch\n im_start_tokens = torch.where(cur_input_ids == img_start_id)[0]\n # im_start_tokens is a list and only one image in it\n img_start_pos = im_start_tokens[0]\n # [num_patch, 1024] -> [num_patch, 4096] .to(device=cur_input_embeds.device)\n cur_image_features = self.vision_projector(image_features[cur_image_idx])\n num_patch = cur_image_features.shape[0]\n\n if self.projector_from_scratch:\n # FIXME: do this at pre-training according to LLaVA, failure to do so may result in performance drop\n # FIXME: if you have a better solution, please concat us, thank you\n cur_new_input_embeds = torch.cat((cur_input_embeds[:img_start_pos].detach(),\n cur_input_embeds[img_start_pos:img_start_pos + 1], # IM_START\n cur_image_features,\n cur_input_embeds[img_start_pos + num_patch + 1:\n img_start_pos + num_patch + 2], # IM_END\n cur_input_embeds[img_start_pos + num_patch + 2:].detach()),\n dim=0)\n else:\n # Fintuning stage, train LLM and visual projector, all embeddings shoule be trained\n cur_new_input_embeds = torch.cat((cur_input_embeds[:img_start_pos + 1],\n cur_image_features,\n cur_input_embeds[img_start_pos + num_patch + 1:]), dim=0)\n cur_image_idx += 1\n\n elif num_vid_start > 0:\n # if there are videos in batch\n vid_start_tokens = torch.where(cur_input_ids == vid_start_id)[0]\n vid_start_pos = vid_start_tokens[0]\n # [n_frm+num_patch, 4096]\n cur_video_features = self.vision_projector(video_features[cur_video_idx])\n num_frm_patch = cur_video_features.shape[0] # n_frm + num_patches\n\n if self.projector_from_scratch:\n cur_new_input_embeds = torch.cat((cur_input_embeds[:vid_start_pos].detach(),\n cur_input_embeds[vid_start_pos:vid_start_pos + 1], # V_START\n cur_video_features,\n cur_input_embeds[vid_start_pos + num_frm_patch + 1:\n vid_start_pos + num_frm_patch + 2], # V_END\n cur_input_embeds[\n vid_start_pos + num_frm_patch + 2:].detach()),\n dim=0)\n else:\n cur_new_input_embeds = torch.cat((cur_input_embeds[:vid_start_pos + 1],\n cur_video_features,\n cur_input_embeds[vid_start_pos + num_frm_patch + 1:]), dim=0)\n cur_video_idx += 1\n else:\n raise NotImplementedError\n batch_input_embeds.append(cur_new_input_embeds)\n\n # list -> tensor stack\n new_inputs_embeds = torch.stack(batch_input_embeds, dim=0)\n\n return None, new_inputs_embeds\n\n def forward(self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n videos: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n ):\n \"\"\"\n Args:\n input_ids: [bs, length, dim]\n attention_mask: [bs, length, dim]\n labels: [bs, length, dim]\n images: [bs, C, H, W]\n videos: [bs, C, T, H, W]\n :return: loss when training else None\n \"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n input_ids, inputs_embeds = self.embed_images_videos(input_ids, images, videos)\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n position_ids=position_ids,\n use_cache=use_cache,\n past_key_values=past_key_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict\n )\n\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model/pipeline parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n # if self.training:\n # output_hidden_states = outputs.hidden_states\n # else:\n # output_hidden_states = hidden_states\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids: torch.LongTensor = None,\n inputs_embeds: torch.Tensor = None,\n attention_mask: torch.Tensor = None,\n images: torch.Tensor = None,\n videos: torch.Tensor = None,\n labels: torch.LongTensor = None,\n past_key_values=None,\n **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"images\": images,\n \"videos\": videos,\n }\n )\n return model_inputs"
}
] | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List
from utils.registry import registry
from models.segment_anything import build_sam_vit_h
from models.ullava_core import UllavaCoreConfig, UllavaCoreForCausalLM
from transformers import AutoModelForCausalLM, PreTrainedModel, PretrainedConfig, AutoConfig | 5,003 | """
u-LLaVA with segmentation module
SAM patch is Adapted form: https://github.com/dvlab-research/LISA/blob/main/model/LISA.py
"""
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
scale=1000,
eps=1e-6,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_masks:
scale:
eps:
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1, 2)
targets = targets.flatten(1, 2)
numerator = 2 * (inputs / scale * targets).sum(-1)
denominator = (inputs / scale).sum(-1) + (targets / scale).sum(-1)
loss = 1 - (numerator + eps) / (denominator + eps)
loss = loss.sum() / (num_masks + 1e-8)
return loss
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_masks:
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
loss = loss.flatten(1, 2).mean(1).sum() / (num_masks + 1e-8)
return loss
class UllavaConfig(PretrainedConfig):
model_type = "ullava"
is_composition = True
def __init__(self,
llm_config=None,
ce_weight=0.5,
bce_weight=0.5,
dice_weight=-1,
out_dim=256,
seg_token_idx=32007,
train_mask_decoder=True,
**kwargs
):
super(UllavaConfig, self).__init__(**kwargs)
self.llm_config = UllavaCoreConfig(**llm_config) if llm_config else {}
self.ce_weight = ce_weight
self.bce_weight = bce_weight
self.out_dim = out_dim
self.dice_weight = dice_weight
self.seg_token_idx = seg_token_idx
self.train_mask_decoder = train_mask_decoder
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["llm_config"] = self.llm_config.to_dict() if self.llm_config else {}
output["ce_weight"] = self.ce_weight
output["bce_weight"] = self.bce_weight
output["dice_weight"] = self.dice_weight
output["out_dim"] = self.out_dim
output["seg_token_idx"] = self.seg_token_idx
output["train_mask_decoder"] = self.train_mask_decoder
output["model_type"] = self.__class__.model_type
return output
@registry.register_model('ullava')
class UllavaForCausalLM(PreTrainedModel):
config_class = UllavaConfig
def __init__(self, config):
super().__init__(config)
self.config = config
llm_config = config.llm_config
self.llm = UllavaCoreForCausalLM(llm_config)
# initialize sam and projector without checkpoint
self.visual_model, self.text_hidden_fcs = self.init_seg_modules(llm_config.hidden_size)
def init_seg_modules(self, hidden_size):
# SAM
| """
u-LLaVA with segmentation module
SAM patch is Adapted form: https://github.com/dvlab-research/LISA/blob/main/model/LISA.py
"""
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
scale=1000,
eps=1e-6,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_masks:
scale:
eps:
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1, 2)
targets = targets.flatten(1, 2)
numerator = 2 * (inputs / scale * targets).sum(-1)
denominator = (inputs / scale).sum(-1) + (targets / scale).sum(-1)
loss = 1 - (numerator + eps) / (denominator + eps)
loss = loss.sum() / (num_masks + 1e-8)
return loss
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
num_masks:
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
loss = loss.flatten(1, 2).mean(1).sum() / (num_masks + 1e-8)
return loss
class UllavaConfig(PretrainedConfig):
model_type = "ullava"
is_composition = True
def __init__(self,
llm_config=None,
ce_weight=0.5,
bce_weight=0.5,
dice_weight=-1,
out_dim=256,
seg_token_idx=32007,
train_mask_decoder=True,
**kwargs
):
super(UllavaConfig, self).__init__(**kwargs)
self.llm_config = UllavaCoreConfig(**llm_config) if llm_config else {}
self.ce_weight = ce_weight
self.bce_weight = bce_weight
self.out_dim = out_dim
self.dice_weight = dice_weight
self.seg_token_idx = seg_token_idx
self.train_mask_decoder = train_mask_decoder
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["llm_config"] = self.llm_config.to_dict() if self.llm_config else {}
output["ce_weight"] = self.ce_weight
output["bce_weight"] = self.bce_weight
output["dice_weight"] = self.dice_weight
output["out_dim"] = self.out_dim
output["seg_token_idx"] = self.seg_token_idx
output["train_mask_decoder"] = self.train_mask_decoder
output["model_type"] = self.__class__.model_type
return output
@registry.register_model('ullava')
class UllavaForCausalLM(PreTrainedModel):
config_class = UllavaConfig
def __init__(self, config):
super().__init__(config)
self.config = config
llm_config = config.llm_config
self.llm = UllavaCoreForCausalLM(llm_config)
# initialize sam and projector without checkpoint
self.visual_model, self.text_hidden_fcs = self.init_seg_modules(llm_config.hidden_size)
def init_seg_modules(self, hidden_size):
# SAM | visual_model = build_sam_vit_h(checkpoint=None) | 1 | 2023-12-21 08:10:23+00:00 | 8k |
chinhsuanwu/ifusion | ifusion.py | [
{
"identifier": "FinetuneIterableDataset",
"path": "dataset/finetune.py",
"snippet": "class FinetuneIterableDataset(IterableDataset, FinetuneDataset):\n def __init__(self, transform_fp):\n super().__init__(transform_fp)\n\n def __iter__(self):\n while True:\n index = torch.randint(0, len(self.perm), size=(1,)).item()\n index_target, index_cond = (\n self.perm[index, 0].item(),\n self.perm[index, 1].item(),\n )\n yield {\n \"image_target\": self.all_images[index_target],\n \"image_cond\": self.all_images[index_cond],\n \"T\": self.get_trans(self.all_camtoworlds[index_target], self.all_camtoworlds[index_cond], in_T=True),\n }"
},
{
"identifier": "MultiImageInferenceDataset",
"path": "dataset/inference.py",
"snippet": "class MultiImageInferenceDataset(Dataset, BaseDataset):\n def __init__(\n self,\n transform_fp,\n n_views: int,\n theta: int,\n radius: float,\n ):\n self.setup(transform_fp)\n self.infer_camtoworlds = make_circular_poses(n_views, theta, radius)\n\n def __len__(self):\n return len(self.infer_camtoworlds)\n\n def __getitem__(self, index):\n target_camtoworld = self.infer_camtoworlds[index]\n latlon = torch.stack(\n [\n self.get_trans(target_camtoworld, self.all_camtoworlds[i], in_T=False)\n for i in range(len(self.all_camtoworlds))\n ]\n )\n return {\n \"image_cond\": self.all_images,\n \"theta\": latlon[:, 0],\n \"azimuth\": latlon[:, 1],\n \"distance\": latlon[:, 2],\n }\n\n def loader(self, batch_size=1, num_workers=8, **kwargs):\n return DataLoader(\n self,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=True,\n sampler=None,\n **kwargs,\n )"
},
{
"identifier": "SingleImageInferenceDataset",
"path": "dataset/inference.py",
"snippet": "class SingleImageInferenceDataset(Dataset, BaseDataset):\n def __init__(\n self,\n image_fp: str = None,\n transform_fp: str = None,\n n_views: int = 8,\n theta: int = -20,\n radius: float = 1.0,\n default_latlon: List[float] = [0, 0, 1],\n ):\n if image_fp:\n self.image = load_image(image_fp, device=\"cpu\").squeeze(0)\n self.camtoworld = latlon2mat(torch.tensor([default_latlon]))\n elif transform_fp:\n self.setup(transform_fp)\n self.image, self.camtoworld = self.all_images[0], self.all_camtoworlds[0]\n else:\n raise ValueError(\"Either image_fp or transform_fp must be provided.\")\n\n self.infer_camtoworlds = make_circular_poses(n_views, theta, radius)\n\n def __len__(self):\n return len(self.infer_camtoworlds)\n\n def __getitem__(self, index):\n target_camtoworld = self.infer_camtoworlds[index]\n latlon = self.get_trans(target_camtoworld, self.camtoworld, in_T=False)\n return {\n \"image_cond\": self.image,\n \"theta\": latlon[0],\n \"azimuth\": latlon[1],\n \"distance\": latlon[2],\n }\n\n def loader(self, batch_size=1, num_workers=8, **kwargs):\n return DataLoader(\n self,\n batch_size=batch_size,\n num_workers=num_workers,\n pin_memory=True,\n sampler=None,\n **kwargs,\n )"
},
{
"identifier": "latlon2mat",
"path": "util/pose.py",
"snippet": "def latlon2mat(latlon, in_deg=True, default_radius=1.0):\n if latlon.shape[-1] == 2:\n radius = torch.ones_like(latlon[:, 0]) * default_radius\n latlon = torch.cat((latlon, radius.unsqueeze(1)), dim=1)\n \n if in_deg:\n latlon[:, :2] = latlon[:, :2].deg2rad()\n mv = [\n translate(0, 0, -radius) @ rotate_x(theta) @ rotate_y(-azimuth)\n for theta, azimuth, radius in latlon\n ]\n c2w = torch.linalg.inv(torch.stack(mv))\n return c2w"
},
{
"identifier": "make_T",
"path": "util/pose.py",
"snippet": "def make_T(theta, azimuth, distance, in_deg=False):\n if in_deg:\n theta, azimuth = theta.deg2rad(), azimuth.deg2rad()\n return torch.stack(\n (\n theta,\n torch.sin(azimuth),\n torch.cos(azimuth),\n distance,\n )\n )"
},
{
"identifier": "mat2latlon",
"path": "util/pose.py",
"snippet": "def mat2latlon(T, in_deg=False, return_radius=False):\n if len(T.shape) == 2:\n T = T.unsqueeze(0)\n xyz = T[:, :3, 3]\n radius = torch.norm(xyz, dim=1, keepdim=True)\n xyz = xyz / radius\n theta = -torch.asin(xyz[:, 1])\n azimuth = torch.atan2(xyz[:, 0], xyz[:, 2])\n\n if in_deg:\n theta, azimuth = theta.rad2deg(), azimuth.rad2deg()\n if return_radius:\n return torch.stack((theta, azimuth, radius.squeeze(0))).T\n return torch.stack((theta, azimuth)).T"
},
{
"identifier": "load_image",
"path": "util/util.py",
"snippet": "def load_image(fp, to_clip=True, verbose=True, device=\"cuda\"):\n if verbose:\n print(f\"[INFO] Loading image {fp}\")\n\n image = np.array(Image.open(fp))\n if image.shape[-1] == 4:\n image[image[..., -1] < 128] = [255] * 4\n image = image[..., :3]\n\n image = cv2.resize(image, (256, 256), interpolation=cv2.INTER_AREA)\n image = image.astype(np.float32) / 255.0\n image = torch.from_numpy(image).contiguous().to(device)\n image = image.permute(2, 0, 1).unsqueeze(0)\n\n if to_clip:\n image = image * 2 - 1\n return image"
},
{
"identifier": "parse_optimizer",
"path": "util/util.py",
"snippet": "def parse_optimizer(config, params):\n optim = getattr(torch.optim, config.name)(params, **config.args)\n return optim"
},
{
"identifier": "parse_scheduler",
"path": "util/util.py",
"snippet": "def parse_scheduler(config, optim):\n scheduler = getattr(torch.optim.lr_scheduler, config.name)(optim, **config.args)\n return scheduler"
},
{
"identifier": "plot_image",
"path": "util/viz.py",
"snippet": "def plot_image(*xs, normalize=False, fp=\"out.png\"):\n # x: [B, 3, H, W], [3, H, W], [1, H, W] or [H, W] torch.Tensor\n # [B, H, W, 3], [H, W, 3], [H, W, 1] or [H, W] numpy.ndarray\n\n def _plot_image(image):\n if isinstance(image, torch.Tensor):\n if len(image.shape) == 3:\n if image.shape[0] == 1 or image.shape[0] == 3 or image.shape[0] == 4:\n image = image.permute(1, 2, 0).squeeze()\n image = image.detach().cpu().numpy()\n\n image = image.astype(np.float32)\n\n # normalize\n if normalize:\n image = (image - image.min(axis=0, keepdims=True)) / (\n image.max(axis=0, keepdims=True)\n - image.min(axis=0, keepdims=True)\n + 1e-8\n )\n\n if image.max() <= 1:\n image *= 255\n Image.fromarray(image.astype(np.uint8)).save(fp)\n\n for x in xs:\n if len(x.shape) == 4:\n for i in range(x.shape[0]):\n _plot_image(x[i])\n else:\n _plot_image(x)"
}
] | import json
import numpy as np
import torch
from glob import glob
from einops import rearrange
from liegroups.torch import SE3
from tqdm import trange
from dataset.finetune import FinetuneIterableDataset
from dataset.inference import MultiImageInferenceDataset, SingleImageInferenceDataset
from util.pose import latlon2mat, make_T, mat2latlon
from util.typing import *
from util.util import load_image, parse_optimizer, parse_scheduler
from util.viz import plot_image | 3,716 | transform_fp: str,
demo_fp: str,
default_latlon: List[float] = [0, 0, 1],
**kwargs,
):
image_fps = sorted(glob(image_dir + "/*.png") + glob(image_dir + "/*.jpg"))
image_fps = [fp for fp in image_fps if fp != demo_fp]
# FIXME: always pick the first image as reference
ref_image = load_image(image_fps[0])
qry_images = [load_image(image_fps[i]) for i in range(1, len(image_fps))]
out_dict = {"camera_angle_x": np.deg2rad(49.1), "frames": []}
out_dict["frames"].append(
{
"file_path": image_fps[0].replace(image_dir + "/", ""),
"transform_matrix": latlon2mat(torch.tensor([default_latlon])).tolist(),
"latlon": list(default_latlon),
}
)
for qry_fp, qry_image in zip(image_fps[1:], qry_images):
assert ref_image.shape == qry_image.shape
pose = optimize_pose_pair(
model=model, ref_image=ref_image, qry_image=qry_image, **kwargs
)
pose = np.add(default_latlon, pose.unsqueeze(0))
out_dict["frames"].append(
{
"file_path": qry_fp.replace(image_dir + "/", ""),
"transform_matrix": latlon2mat(pose.clone()).tolist(),
"latlon": pose.squeeze().tolist(),
}
)
# save poses to json
with open(transform_fp, "w") as f:
json.dump(out_dict, f, indent=4)
def finetune(
model,
transform_fp: str,
lora_ckpt_fp: str,
lora_rank: int,
lora_target_replace_module: List[str],
args,
):
model.inject_lora(
rank=lora_rank,
target_replace_module=lora_target_replace_module,
)
train_dataset = FinetuneIterableDataset(transform_fp)
train_loader = train_dataset.loader(args.batch_size)
optimizer = parse_optimizer(args.optimizer, model.require_grad_params)
scheduler = parse_scheduler(args.scheduler, optimizer)
train_loader = iter(train_loader)
with trange(args.max_step) as pbar:
for step in pbar:
optimizer.zero_grad()
batch = next(train_loader)
batch = {k: v.to(model.device) for k, v in batch.items()}
loss = model(batch)
pbar.set_description(f"step: {step}, loss: {loss.item():.4f}")
loss.backward()
optimizer.step()
scheduler.step()
model.save_lora(lora_ckpt_fp)
model.remove_lora()
def inference(
model,
transform_fp: str,
lora_ckpt_fp: str,
demo_fp: str,
lora_rank: int,
lora_target_replace_module: List[str],
use_multi_view_condition: bool,
n_views: int,
theta: float,
radius: float,
args,
):
if lora_ckpt_fp:
model.inject_lora(
ckpt_fp=lora_ckpt_fp,
rank=lora_rank,
target_replace_module=lora_target_replace_module,
)
if use_multi_view_condition:
test_dataset = MultiImageInferenceDataset
generate_fn = model.generate_from_tensor_multi_cond
else:
test_dataset = SingleImageInferenceDataset
generate_fn = model.generate_from_tensor
test_dataset = test_dataset(
transform_fp=transform_fp, n_views=n_views, theta=theta, radius=radius
)
test_loader = test_dataset.loader(args.batch_size)
for batch in test_loader:
batch = {k: v.to(model.device) for k, v in batch.items()}
out = generate_fn(
image=batch["image_cond"],
theta=batch["theta"],
azimuth=batch["azimuth"],
distance=batch["distance"],
)
if lora_ckpt_fp:
model.remove_lora()
out = rearrange(out, "b c h w -> 1 c h (b w)")
|
def optimize_pose_loop(
model,
image_cond: Float[Tensor, "2 3 256 256"],
image_target: Float[Tensor, "2 3 256 256"],
T: Float[Tensor, "4 4"],
default_radius: float,
search_radius_range: float,
use_step_ratio: bool,
args,
**kwargs,
):
# init xi in se(3)
xi = torch.randn(6) * 1e-6
xi.requires_grad_()
optimizer = parse_optimizer(args.optimizer, [xi])
scheduler = parse_scheduler(args.scheduler, optimizer)
total_loss = 0.0
with trange(args.max_step) as pbar:
for step in pbar:
optimizer.zero_grad()
# se(3) -> SE(3)
T_delta = SE3.exp(xi).as_matrix()
T_ = T @ T_delta
latlon = mat2latlon(T_).squeeze()
theta, azimuth = latlon[0], latlon[1]
distance = (
torch.sin(torch.norm(T_[:3, 3]) - default_radius) * search_radius_range
)
idx = [0, 1] if torch.rand(1) < 0.5 else [1, 0]
batch = {
"image_cond": image_cond[idx],
"image_target": image_target[idx],
"T": torch.stack(
(
make_T(theta, azimuth, distance),
make_T(-theta, -azimuth, -distance),
)
)[idx].to(model.device),
}
if use_step_ratio:
loss = model(batch, step_ratio=step / args.max_step)
else:
loss = model(batch)
total_loss += loss
pbar.set_description(
f"step: {step}, total_loss: {total_loss:.4f}, loss: {loss.item():.2f}, theta: {theta.rad2deg().item():.2f}, azimuth: {azimuth.rad2deg().item():.2f}, distance: {distance.item():.2f}"
)
loss.backward()
optimizer.step()
scheduler.step(total_loss)
return total_loss, theta, azimuth, distance
def optimize_pose_pair(
model,
ref_image: Float[Tensor, "1 3 256 256"],
qry_image: Float[Tensor, "1 3 256 256"],
init_latlon: List[List],
**kwargs,
):
image_cond = torch.cat((ref_image, qry_image)).to(model.device)
image_target = torch.cat((qry_image, ref_image)).to(model.device)
init_T = latlon2mat(torch.tensor(init_latlon))
results = []
for T in init_T:
total_loss, theta, azimuth, distance = optimize_pose_loop(
model,
image_cond=image_cond,
image_target=image_target,
T=T,
**kwargs,
)
results.append(
(
total_loss.item(),
theta.rad2deg().item(),
azimuth.rad2deg().item(),
distance.item(),
)
)
results = torch.tensor(results)
best_idx = torch.argmin(results[:, 0])
pred_pose = results[best_idx][1:]
print(
f"[INFO] Best pose: theta: {pred_pose[0]:.2f}, azimuth: {pred_pose[1]:.2f}, distance: {pred_pose[2]:.2f}"
)
return pred_pose
def optimize_pose(
model,
image_dir: str,
transform_fp: str,
demo_fp: str,
default_latlon: List[float] = [0, 0, 1],
**kwargs,
):
image_fps = sorted(glob(image_dir + "/*.png") + glob(image_dir + "/*.jpg"))
image_fps = [fp for fp in image_fps if fp != demo_fp]
# FIXME: always pick the first image as reference
ref_image = load_image(image_fps[0])
qry_images = [load_image(image_fps[i]) for i in range(1, len(image_fps))]
out_dict = {"camera_angle_x": np.deg2rad(49.1), "frames": []}
out_dict["frames"].append(
{
"file_path": image_fps[0].replace(image_dir + "/", ""),
"transform_matrix": latlon2mat(torch.tensor([default_latlon])).tolist(),
"latlon": list(default_latlon),
}
)
for qry_fp, qry_image in zip(image_fps[1:], qry_images):
assert ref_image.shape == qry_image.shape
pose = optimize_pose_pair(
model=model, ref_image=ref_image, qry_image=qry_image, **kwargs
)
pose = np.add(default_latlon, pose.unsqueeze(0))
out_dict["frames"].append(
{
"file_path": qry_fp.replace(image_dir + "/", ""),
"transform_matrix": latlon2mat(pose.clone()).tolist(),
"latlon": pose.squeeze().tolist(),
}
)
# save poses to json
with open(transform_fp, "w") as f:
json.dump(out_dict, f, indent=4)
def finetune(
model,
transform_fp: str,
lora_ckpt_fp: str,
lora_rank: int,
lora_target_replace_module: List[str],
args,
):
model.inject_lora(
rank=lora_rank,
target_replace_module=lora_target_replace_module,
)
train_dataset = FinetuneIterableDataset(transform_fp)
train_loader = train_dataset.loader(args.batch_size)
optimizer = parse_optimizer(args.optimizer, model.require_grad_params)
scheduler = parse_scheduler(args.scheduler, optimizer)
train_loader = iter(train_loader)
with trange(args.max_step) as pbar:
for step in pbar:
optimizer.zero_grad()
batch = next(train_loader)
batch = {k: v.to(model.device) for k, v in batch.items()}
loss = model(batch)
pbar.set_description(f"step: {step}, loss: {loss.item():.4f}")
loss.backward()
optimizer.step()
scheduler.step()
model.save_lora(lora_ckpt_fp)
model.remove_lora()
def inference(
model,
transform_fp: str,
lora_ckpt_fp: str,
demo_fp: str,
lora_rank: int,
lora_target_replace_module: List[str],
use_multi_view_condition: bool,
n_views: int,
theta: float,
radius: float,
args,
):
if lora_ckpt_fp:
model.inject_lora(
ckpt_fp=lora_ckpt_fp,
rank=lora_rank,
target_replace_module=lora_target_replace_module,
)
if use_multi_view_condition:
test_dataset = MultiImageInferenceDataset
generate_fn = model.generate_from_tensor_multi_cond
else:
test_dataset = SingleImageInferenceDataset
generate_fn = model.generate_from_tensor
test_dataset = test_dataset(
transform_fp=transform_fp, n_views=n_views, theta=theta, radius=radius
)
test_loader = test_dataset.loader(args.batch_size)
for batch in test_loader:
batch = {k: v.to(model.device) for k, v in batch.items()}
out = generate_fn(
image=batch["image_cond"],
theta=batch["theta"],
azimuth=batch["azimuth"],
distance=batch["distance"],
)
if lora_ckpt_fp:
model.remove_lora()
out = rearrange(out, "b c h w -> 1 c h (b w)") | plot_image(out, fp=demo_fp) | 9 | 2023-12-17 12:45:38+00:00 | 8k |
wangzhecheng/SkyScript | src/open_clip/coca_model.py | [
{
"identifier": "LayerNormFp32",
"path": "src/open_clip/transformer.py",
"snippet": "class LayerNormFp32(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)"
},
{
"identifier": "LayerNorm",
"path": "src/open_clip/transformer.py",
"snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm (with cast back to input dtype).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)"
},
{
"identifier": "QuickGELU",
"path": "src/open_clip/transformer.py",
"snippet": "class QuickGELU(nn.Module):\n # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)"
},
{
"identifier": "MultimodalTransformer",
"path": "src/open_clip/transformer.py",
"snippet": "class MultimodalTransformer(Transformer):\n def __init__(\n self,\n width: int,\n layers: int,\n heads: int,\n context_length: int = 77,\n mlp_ratio: float = 4.0,\n ls_init_value: float = None,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n output_dim: int = 512,\n ):\n\n super().__init__(\n width=width,\n layers=layers,\n heads=heads,\n mlp_ratio=mlp_ratio,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.context_length = context_length\n self.cross_attn = nn.ModuleList([\n ResidualAttentionBlock(\n width,\n heads,\n mlp_ratio,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n is_cross_attention=True,\n )\n for _ in range(layers)\n ])\n\n self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)\n\n self.ln_final = norm_layer(width)\n self.text_projection = nn.Parameter(torch.empty(width, output_dim))\n\n def init_parameters(self):\n proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n for block in self.transformer.cross_attn:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def forward(self, image_embs, text_embs):\n text_embs = text_embs.permute(1, 0, 2) # NLD -> LNDsq\n image_embs = image_embs.permute(1, 0, 2) # NLD -> LND\n seq_len = text_embs.shape[0]\n\n for resblock, cross_attn in zip(self.resblocks, self.cross_attn):\n if self.grad_checkpointing and not torch.jit.is_scripting():\n # TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372\n text_embs = checkpoint(resblock, text_embs, None, None, self.attn_mask[:seq_len, :seq_len])\n text_embs = checkpoint(cross_attn, text_embs, image_embs, image_embs, None)\n else:\n text_embs = resblock(text_embs, attn_mask=self.attn_mask[:seq_len, :seq_len])\n text_embs = cross_attn(text_embs, k_x=image_embs, v_x=image_embs)\n\n x = text_embs.permute(1, 0, 2) # LND -> NLD\n x = self.ln_final(x)\n\n if self.text_projection is not None:\n x = x @ self.text_projection\n\n return x\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.grad_checkpointing = enable"
},
{
"identifier": "CLIPTextCfg",
"path": "src/open_clip/model.py",
"snippet": "class CLIPTextCfg:\n context_length: int = 77\n vocab_size: int = 49408\n width: int = 512\n heads: int = 8\n layers: int = 12\n ls_init_value: Optional[float] = None # layer scale initial value\n hf_model_name: str = None\n hf_tokenizer_name: str = None\n hf_model_pretrained: bool = True\n proj: str = 'mlp'\n pooler_type: str = 'mean_pooler'\n embed_cls: bool = False\n pad_id: int = 0\n output_tokens: bool = False"
},
{
"identifier": "CLIPVisionCfg",
"path": "src/open_clip/model.py",
"snippet": "class CLIPVisionCfg:\n layers: Union[Tuple[int, int, int, int], int] = 12\n width: int = 768\n head_width: int = 64\n mlp_ratio: float = 4.0\n patch_size: int = 16\n image_size: Union[Tuple[int, int], int] = 224\n\n ls_init_value: Optional[float] = None # layer scale initial value\n patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results\n input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design\n global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)\n attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer\n n_queries: int = 256 # n_queries for attentional pooler\n attn_pooler_heads: int = 8 # n heads for attentional_pooling\n output_tokens: bool = False\n\n timm_model_name: str = None # a valid model name overrides layers, width, patch_size\n timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model\n timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')\n timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')\n timm_proj_bias: bool = False # enable bias final projection\n timm_drop: float = 0. # head dropout\n timm_drop_path: Optional[float] = None # backbone stochastic depth"
},
{
"identifier": "_build_vision_tower",
"path": "src/open_clip/model.py",
"snippet": "def _build_vision_tower(\n embed_dim: int,\n vision_cfg: CLIPVisionCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None\n):\n if isinstance(vision_cfg, dict):\n vision_cfg = CLIPVisionCfg(**vision_cfg)\n\n # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more\n # memory efficient in recent PyTorch releases (>= 1.10).\n # NOTE: timm models always use native GELU regardless of quick_gelu flag.\n act_layer = QuickGELU if quick_gelu else nn.GELU\n\n if vision_cfg.timm_model_name:\n visual = TimmModel(\n vision_cfg.timm_model_name,\n pretrained=vision_cfg.timm_model_pretrained,\n pool=vision_cfg.timm_pool,\n proj=vision_cfg.timm_proj,\n proj_bias=vision_cfg.timm_proj_bias,\n drop=vision_cfg.timm_drop,\n drop_path=vision_cfg.timm_drop_path,\n patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None,\n embed_dim=embed_dim,\n image_size=vision_cfg.image_size,\n )\n elif isinstance(vision_cfg.layers, (tuple, list)):\n vision_heads = vision_cfg.width * 32 // vision_cfg.head_width\n visual = ModifiedResNet(\n layers=vision_cfg.layers,\n output_dim=embed_dim,\n heads=vision_heads,\n image_size=vision_cfg.image_size,\n width=vision_cfg.width,\n )\n else:\n vision_heads = vision_cfg.width // vision_cfg.head_width\n norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm\n visual = VisionTransformer(\n image_size=vision_cfg.image_size,\n patch_size=vision_cfg.patch_size,\n width=vision_cfg.width,\n layers=vision_cfg.layers,\n heads=vision_heads,\n mlp_ratio=vision_cfg.mlp_ratio,\n ls_init_value=vision_cfg.ls_init_value,\n patch_dropout=vision_cfg.patch_dropout,\n input_patchnorm=vision_cfg.input_patchnorm,\n global_average_pool=vision_cfg.global_average_pool,\n attentional_pool=vision_cfg.attentional_pool,\n n_queries=vision_cfg.n_queries,\n attn_pooler_heads=vision_cfg.attn_pooler_heads,\n output_tokens=vision_cfg.output_tokens,\n output_dim=embed_dim,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n\n return visual"
},
{
"identifier": "_build_text_tower",
"path": "src/open_clip/model.py",
"snippet": "def _build_text_tower(\n embed_dim: int,\n text_cfg: CLIPTextCfg,\n quick_gelu: bool = False,\n cast_dtype: Optional[torch.dtype] = None,\n):\n if isinstance(text_cfg, dict):\n text_cfg = CLIPTextCfg(**text_cfg)\n\n if text_cfg.hf_model_name:\n text = HFTextEncoder(\n text_cfg.hf_model_name,\n output_dim=embed_dim,\n proj=text_cfg.proj,\n pooler_type=text_cfg.pooler_type,\n pretrained=text_cfg.hf_model_pretrained,\n output_tokens=text_cfg.output_tokens,\n )\n else:\n act_layer = QuickGELU if quick_gelu else nn.GELU\n norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm\n\n text = TextTransformer(\n context_length=text_cfg.context_length,\n vocab_size=text_cfg.vocab_size,\n width=text_cfg.width,\n heads=text_cfg.heads,\n layers=text_cfg.layers,\n ls_init_value=text_cfg.ls_init_value,\n output_dim=embed_dim,\n embed_cls=text_cfg.embed_cls,\n output_tokens=text_cfg.output_tokens,\n pad_id=text_cfg.pad_id,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n return text"
}
] | from typing import Optional
from torch import nn
from torch.nn import functional as F
from dataclasses import dataclass
from .transformer import (
LayerNormFp32,
LayerNorm,
QuickGELU,
MultimodalTransformer,
)
from .model import CLIPTextCfg, CLIPVisionCfg, _build_vision_tower, _build_text_tower
from transformers import (
BeamSearchScorer,
LogitsProcessorList,
TopPLogitsWarper,
TopKLogitsWarper,
RepetitionPenaltyLogitsProcessor,
MinLengthLogitsProcessor,
MaxLengthCriteria,
StoppingCriteriaList
)
import torch
import numpy as np | 3,666 | """
Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt
"""
try:
GENERATION_TYPES = {
"top_k": TopKLogitsWarper,
"top_p": TopPLogitsWarper,
"beam_search": "beam_search"
}
_has_transformers = True
except ImportError as e:
GENERATION_TYPES = {
"top_k": None,
"top_p": None,
"beam_search": "beam_search"
}
_has_transformers = False
@dataclass
class MultimodalCfg(CLIPTextCfg):
mlp_ratio: int = 4
dim_head: int = 64
heads: int = 8
n_queries: int = 256
attn_pooler_heads: int = 8
def _build_text_decoder_tower(
embed_dim,
multimodal_cfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
):
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
act_layer = QuickGELU if quick_gelu else nn.GELU
norm_layer = (
LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
)
decoder = MultimodalTransformer(
context_length=multimodal_cfg.context_length,
width=multimodal_cfg.width,
heads=multimodal_cfg.heads,
layers=multimodal_cfg.layers,
ls_init_value=multimodal_cfg.ls_init_value,
output_dim=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer,
)
return decoder
class CoCa(nn.Module):
def __init__(
self,
embed_dim,
multimodal_cfg: MultimodalCfg,
text_cfg: CLIPTextCfg,
vision_cfg: CLIPVisionCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
pad_id: int = 0,
):
super().__init__()
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg
vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg
| """
Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt
"""
try:
GENERATION_TYPES = {
"top_k": TopKLogitsWarper,
"top_p": TopPLogitsWarper,
"beam_search": "beam_search"
}
_has_transformers = True
except ImportError as e:
GENERATION_TYPES = {
"top_k": None,
"top_p": None,
"beam_search": "beam_search"
}
_has_transformers = False
@dataclass
class MultimodalCfg(CLIPTextCfg):
mlp_ratio: int = 4
dim_head: int = 64
heads: int = 8
n_queries: int = 256
attn_pooler_heads: int = 8
def _build_text_decoder_tower(
embed_dim,
multimodal_cfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
):
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
act_layer = QuickGELU if quick_gelu else nn.GELU
norm_layer = (
LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
)
decoder = MultimodalTransformer(
context_length=multimodal_cfg.context_length,
width=multimodal_cfg.width,
heads=multimodal_cfg.heads,
layers=multimodal_cfg.layers,
ls_init_value=multimodal_cfg.ls_init_value,
output_dim=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer,
)
return decoder
class CoCa(nn.Module):
def __init__(
self,
embed_dim,
multimodal_cfg: MultimodalCfg,
text_cfg: CLIPTextCfg,
vision_cfg: CLIPVisionCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
pad_id: int = 0,
):
super().__init__()
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg
vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg
| self.text = _build_text_tower( | 7 | 2023-12-19 11:50:56+00:00 | 8k |
JarodMica/ai-voice-cloning | modules/rvc/infer-web.py | [
{
"identifier": "uvr",
"path": "modules/rvc/infer/modules/uvr5/modules.py",
"snippet": "def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0):\n infos = []\n try:\n inp_root = inp_root.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n save_root_vocal = (\n save_root_vocal.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n )\n save_root_ins = (\n save_root_ins.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n )\n if model_name == \"onnx_dereverb_By_FoxJoy\":\n pre_fun = MDXNetDereverb(15, config.device)\n else:\n func = AudioPre if \"DeEcho\" not in model_name else AudioPreDeEcho\n pre_fun = func(\n agg=int(agg),\n model_path=os.path.join(\n os.getenv(\"weight_uvr5_root\"), model_name + \".pth\"\n ),\n device=config.device,\n is_half=config.is_half,\n )\n if inp_root != \"\":\n paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)]\n else:\n paths = [path.name for path in paths]\n for path in paths:\n inp_path = os.path.join(inp_root, path)\n need_reformat = 1\n done = 0\n try:\n info = ffmpeg.probe(inp_path, cmd=\"ffprobe\")\n if (\n info[\"streams\"][0][\"channels\"] == 2\n and info[\"streams\"][0][\"sample_rate\"] == \"44100\"\n ):\n need_reformat = 0\n pre_fun._path_audio_(\n inp_path, save_root_ins, save_root_vocal, format0\n )\n done = 1\n except:\n need_reformat = 1\n traceback.print_exc()\n if need_reformat == 1:\n tmp_path = \"%s/%s.reformatted.wav\" % (\n os.path.join(os.environ[\"TEMP\"]),\n os.path.basename(inp_path),\n )\n os.system(\n \"ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y\"\n % (inp_path, tmp_path)\n )\n inp_path = tmp_path\n try:\n if done == 0:\n pre_fun.path_audio(\n inp_path, save_root_ins, save_root_vocal, format0\n )\n infos.append(\"%s->Success\" % (os.path.basename(inp_path)))\n yield \"\\n\".join(infos)\n except:\n try:\n if done == 0:\n pre_fun._path_audio_(\n inp_path, save_root_ins, save_root_vocal, format0\n )\n infos.append(\"%s->Success\" % (os.path.basename(inp_path)))\n yield \"\\n\".join(infos)\n except:\n infos.append(\n \"%s->%s\" % (os.path.basename(inp_path), traceback.format_exc())\n )\n yield \"\\n\".join(infos)\n except:\n infos.append(traceback.format_exc())\n yield \"\\n\".join(infos)\n finally:\n try:\n if model_name == \"onnx_dereverb_By_FoxJoy\":\n del pre_fun.pred.model\n del pre_fun.pred.model_\n else:\n del pre_fun.model\n del pre_fun\n except:\n traceback.print_exc()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n logger.info(\"Executed torch.cuda.empty_cache()\")\n yield \"\\n\".join(infos)"
},
{
"identifier": "VC",
"path": "modules/rvc/infer/modules/vc/modules.py",
"snippet": "class VC:\n def __init__(self, config):\n self.n_spk = None\n self.tgt_sr = None\n self.net_g = None\n self.pipeline = None\n self.cpt = None\n self.version = None\n self.if_f0 = None\n self.version = None\n self.hubert_model = None\n\n self.config = config\n\n def get_vc(self, sid, *to_return_protect):\n logger.info(\"Get sid: \" + sid)\n\n to_return_protect0 = {\n \"visible\": self.if_f0 != 0,\n \"value\": to_return_protect[0]\n if self.if_f0 != 0 and to_return_protect\n else 0.5,\n \"__type__\": \"update\",\n }\n to_return_protect1 = {\n \"visible\": self.if_f0 != 0,\n \"value\": to_return_protect[1]\n if self.if_f0 != 0 and to_return_protect\n else 0.33,\n \"__type__\": \"update\",\n }\n\n if sid == \"\" or sid == []:\n if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的\n logger.info(\"Clean model cache\")\n del (\n self.net_g,\n self.n_spk,\n self.vc,\n self.hubert_model,\n self.tgt_sr,\n ) # ,cpt\n self.hubert_model = (\n self.net_g\n ) = self.n_spk = self.vc = self.hubert_model = self.tgt_sr = None\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n ###楼下不这么折腾清理不干净\n self.if_f0 = self.cpt.get(\"f0\", 1)\n self.version = self.cpt.get(\"version\", \"v1\")\n if self.version == \"v1\":\n if self.if_f0 == 1:\n self.net_g = SynthesizerTrnMs256NSFsid(\n *self.cpt[\"config\"], is_half=self.config.is_half\n )\n else:\n self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt[\"config\"])\n elif self.version == \"v2\":\n if self.if_f0 == 1:\n self.net_g = SynthesizerTrnMs768NSFsid(\n *self.cpt[\"config\"], is_half=self.config.is_half\n )\n else:\n self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt[\"config\"])\n del self.net_g, self.cpt\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return (\n {\"visible\": False, \"__type__\": \"update\"},\n {\n \"visible\": True,\n \"value\": to_return_protect0,\n \"__type__\": \"update\",\n },\n {\n \"visible\": True,\n \"value\": to_return_protect1,\n \"__type__\": \"update\",\n },\n \"\",\n \"\",\n )\n # person = f'{os.getenv(\"weight_root\")}/{sid}'\n person = os.path.join(os.getcwd(), sid)\n logger.info(f\"Loading: {person}\")\n\n self.cpt = torch.load(person, map_location=\"cpu\", weights_only=True)\n self.tgt_sr = self.cpt[\"config\"][-1]\n self.cpt[\"config\"][-3] = self.cpt[\"weight\"][\"emb_g.weight\"].shape[0] # n_spk\n self.if_f0 = self.cpt.get(\"f0\", 1)\n self.version = self.cpt.get(\"version\", \"v1\")\n\n synthesizer_class = {\n (\"v1\", 1): SynthesizerTrnMs256NSFsid,\n (\"v1\", 0): SynthesizerTrnMs256NSFsid_nono,\n (\"v2\", 1): SynthesizerTrnMs768NSFsid,\n (\"v2\", 0): SynthesizerTrnMs768NSFsid_nono,\n }\n\n self.net_g = synthesizer_class.get(\n (self.version, self.if_f0), SynthesizerTrnMs256NSFsid\n )(*self.cpt[\"config\"], is_half=self.config.is_half)\n\n del self.net_g.enc_q\n\n self.net_g.load_state_dict(self.cpt[\"weight\"], strict=False)\n self.net_g.eval().to(self.config.device)\n if self.config.is_half:\n self.net_g = self.net_g.half()\n else:\n self.net_g = self.net_g.float()\n\n self.pipeline = Pipeline(self.tgt_sr, self.config)\n n_spk = self.cpt[\"config\"][-3]\n index = {\"value\": get_index_path_from_model(sid), \"__type__\": \"update\"}\n logger.info(\"Select index: \" + index[\"value\"])\n\n return (\n (\n {\"visible\": True, \"maximum\": n_spk, \"__type__\": \"update\"},\n to_return_protect0,\n to_return_protect1,\n index,\n index,\n )\n if to_return_protect\n else {\"visible\": True, \"maximum\": n_spk, \"__type__\": \"update\", \"tgt_sr\" : self.tgt_sr}\n )\n\n def vc_single(\n self,\n sid,\n input_audio_path,\n f0_up_key,\n f0_file,\n f0_method,\n file_index,\n file_index2,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n ):\n if input_audio_path is None:\n return \"You need to upload an audio\", None\n f0_up_key = int(f0_up_key)\n try:\n audio = load_audio(input_audio_path, 16000)\n audio_max = np.abs(audio).max() / 0.95\n if audio_max > 1:\n audio /= audio_max\n times = [0, 0, 0]\n\n if self.hubert_model is None:\n self.hubert_model = load_hubert(self.config)\n\n file_index = (\n (\n file_index.strip(\" \")\n .strip('\"')\n .strip(\"\\n\")\n .strip('\"')\n .strip(\" \")\n .replace(\"trained\", \"added\")\n )\n if file_index != \"\"\n else file_index2\n ) # 防止小白写错,自动帮他替换掉\n\n audio_opt = self.pipeline.pipeline(\n self.hubert_model,\n self.net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n self.if_f0,\n filter_radius,\n self.tgt_sr,\n resample_sr,\n rms_mix_rate,\n self.version,\n protect,\n f0_file,\n )\n if self.tgt_sr != resample_sr >= 16000:\n tgt_sr = resample_sr\n else:\n tgt_sr = self.tgt_sr\n index_info = (\n \"Index:\\n%s.\" % file_index\n if os.path.exists(file_index)\n else \"Index not used.\"\n )\n return (\n \"Success.\\n%s\\nTime:\\nnpy: %.2fs, f0: %.2fs, infer: %.2fs.\"\n % (index_info, *times),\n (tgt_sr, audio_opt),\n )\n except:\n info = traceback.format_exc()\n logger.warning(info)\n return info, (None, None)\n\n def vc_multi(\n self,\n sid,\n dir_path,\n opt_root,\n paths,\n f0_up_key,\n f0_method,\n file_index,\n file_index2,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n format1,\n ):\n try:\n dir_path = (\n dir_path.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n opt_root = opt_root.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n os.makedirs(opt_root, exist_ok=True)\n try:\n if dir_path != \"\":\n paths = [\n os.path.join(dir_path, name) for name in os.listdir(dir_path)\n ]\n else:\n paths = [path.name for path in paths]\n except:\n traceback.print_exc()\n paths = [path.name for path in paths]\n infos = []\n for path in paths:\n info, opt = self.vc_single(\n sid,\n path,\n f0_up_key,\n None,\n f0_method,\n file_index,\n file_index2,\n # file_big_npy,\n index_rate,\n filter_radius,\n resample_sr,\n rms_mix_rate,\n protect,\n )\n if \"Success\" in info:\n try:\n tgt_sr, audio_opt = opt\n if format1 in [\"wav\", \"flac\"]:\n sf.write(\n \"%s/%s.%s\"\n % (opt_root, os.path.basename(path), format1),\n audio_opt,\n tgt_sr,\n )\n else:\n path = \"%s/%s.%s\" % (\n opt_root,\n os.path.basename(path),\n format1,\n )\n with BytesIO() as wavf:\n sf.write(wavf, audio_opt, tgt_sr, format=\"wav\")\n wavf.seek(0, 0)\n with open(path, \"wb\") as outf:\n wav2(wavf, outf, format1)\n except:\n info += traceback.format_exc()\n infos.append(\"%s->%s\" % (os.path.basename(path), info))\n yield \"\\n\".join(infos)\n yield \"\\n\".join(infos)\n except:\n yield traceback.format_exc()"
}
] | import os, sys
import logging
import shutil
import threading
import traceback
import warnings
import json
import pathlib
import fairseq
import faiss
import gradio as gr
import numpy as np
import torch
from random import shuffle
from subprocess import Popen
from time import sleep
from dotenv import load_dotenv
from sklearn.cluster import MiniBatchKMeans
from configs.config import Config
from i18n.i18n import I18nAuto
from modules.rvc.infer.lib.train.process_ckpt import (
change_info,
extract_small_model,
merge,
show_info,
)
from modules.rvc.infer.modules.uvr5.modules import uvr
from modules.rvc.infer.modules.vc.modules import VC
from modules.rvc.infer.modules.onnx.export import export_onnx as eo | 3,728 |
now_dir = os.getcwd()
sys.path.append(now_dir)
logging.getLogger("numba").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
tmp = os.path.join(now_dir, "TEMP")
shutil.rmtree(tmp, ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True)
os.makedirs(tmp, exist_ok=True)
os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True)
os.environ["TEMP"] = tmp
warnings.filterwarnings("ignore")
torch.manual_seed(114514)
load_dotenv()
config = Config()
|
now_dir = os.getcwd()
sys.path.append(now_dir)
logging.getLogger("numba").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
tmp = os.path.join(now_dir, "TEMP")
shutil.rmtree(tmp, ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/infer_pack" % (now_dir), ignore_errors=True)
shutil.rmtree("%s/runtime/Lib/site-packages/uvr5_pack" % (now_dir), ignore_errors=True)
os.makedirs(tmp, exist_ok=True)
os.makedirs(os.path.join(now_dir, "logs"), exist_ok=True)
os.makedirs(os.path.join(now_dir, "assets/weights"), exist_ok=True)
os.environ["TEMP"] = tmp
warnings.filterwarnings("ignore")
torch.manual_seed(114514)
load_dotenv()
config = Config() | vc = VC(config) | 1 | 2023-12-18 00:10:23+00:00 | 8k |
penghao-wu/vstar | VisualSearch/model/owlvit/owlvit.py | [
{
"identifier": "box_ops",
"path": "VisualSearch/model/owlvit/util/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):"
},
{
"identifier": "nested_tensor_from_tensor_list",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)"
},
{
"identifier": "accuracy",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res"
},
{
"identifier": "interpolate",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if float(torchvision.__version__[:3]) < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n if float(torchvision.__version__[:3]) < 0.5:\n return _NewEmptyTensorOp.apply(input, output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)"
},
{
"identifier": "inverse_sigmoid",
"path": "VisualSearch/model/owlvit/util/misc.py",
"snippet": "def inverse_sigmoid(x, eps=1e-5):\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)"
},
{
"identifier": "HungarianMatcher",
"path": "VisualSearch/model/owlvit/matcher.py",
"snippet": "class HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n def forward(self, outputs, targets):\n \"\"\" Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n with torch.no_grad():\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n out_prob = outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n alpha = 0.25\n gamma = 2.0\n neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = cdist(out_bbox, tgt_bbox)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox),\n box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]"
},
{
"identifier": "dice_loss",
"path": "VisualSearch/model/owlvit/segmentation.py",
"snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes"
},
{
"identifier": "sigmoid_focal_loss",
"path": "VisualSearch/model/owlvit/segmentation.py",
"snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1)"
},
{
"identifier": "HungarianMatcher",
"path": "VisualSearch/model/owlvit/matcher.py",
"snippet": "class HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n def forward(self, outputs, targets):\n \"\"\" Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n with torch.no_grad():\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n out_prob = outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n alpha = 0.25\n gamma = 2.0\n neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = cdist(out_bbox, tgt_bbox)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox),\n box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]"
}
] | import torch
import torch.nn.functional as F
import numpy as np
import math
import copy
from torch import nn
from typing import Any, Dict, Optional, Tuple, Union
from transformers import OwlViTForObjectDetection, OwlViTConfig
from .util import box_ops
from .util.misc import (nested_tensor_from_tensor_list,
accuracy, interpolate, inverse_sigmoid)
from .matcher import HungarianMatcher
from .segmentation import dice_loss, sigmoid_focal_loss
from .matcher import HungarianMatcher | 5,482 | pred_boxes += self.compute_box_bias(feature_map)
pred_boxes = self.sigmoid(pred_boxes)
return pred_boxes
def class_predictor(
self,
image_feats: torch.FloatTensor,
query_embeds: Optional[torch.FloatTensor] = None,
query_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.FloatTensor]:
"""
Args:
image_feats:
Features extracted from the `image_text_embedder`.
query_embeds:
Text query embeddings.
query_mask:
Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
"""
(pred_logits, image_class_embeds) = self.class_head(image_feats, query_embeds, query_mask)
return (pred_logits, image_class_embeds)
def get_visual_embs(self, image):
vision_outputs = self.vision_model(
pixel_values=image,
output_hidden_states=self.vision_model.config.output_hidden_states,
return_dict=self.vision_model.config.use_return_dict,
)
# Get image embeddings
last_hidden_state = vision_outputs[0]
image_embeds = self.vision_model.post_layernorm(last_hidden_state)
# Resize class token
new_size = tuple(np.array(image_embeds.shape) - np.array((0, 1, 0)))
class_token_out = torch.broadcast_to(image_embeds[:, :1, :], new_size)
# Merge image embedding with class tokens
image_embeds = image_embeds[:, 1:, :] * class_token_out
image_embeds = self.layer_norm(image_embeds)
# Resize to [batch_size, num_patches, num_patches, hidden_size]
new_size = (
image_embeds.shape[0],
int(np.sqrt(image_embeds.shape[1])),
int(np.sqrt(image_embeds.shape[1])),
image_embeds.shape[-1],
)
feature_map = image_embeds.reshape(new_size)
return feature_map
def forward(
self,
image_embeddings: torch.Tensor,
prompt_embeddings: torch.Tensor,
):
feature_map = image_embeddings
batch_size, num_patches, num_patches, hidden_dim = feature_map.shape
image_feats = torch.reshape(feature_map, (batch_size, num_patches * num_patches, hidden_dim))
query_embeds = prompt_embeddings.reshape(batch_size, 1, prompt_embeddings.shape[-1])
# Predict object classes [batch_size, num_patches, num_queries+1]
(pred_logits, class_embeds) = self.class_predictor(image_feats, query_embeds)
# Predict object boxes
pred_boxes = self.box_predictor(image_feats, feature_map)
out = {'pred_logits': pred_logits, 'pred_boxes': pred_boxes}
return out
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
losses: list of all the losses to be applied. See get_loss for list of available losses.
focal_alpha: alpha in Focal Loss
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.losses = losses
self.focal_alpha = focal_alpha
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:,:,:-1]
loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here
|
class OwlViT(nn.Module):
def __init__(self, num_classes, is_eval=False):
super().__init__()
if is_eval:
owlViT_config = OwlViTConfig.from_pretrained("google/owlvit-base-patch16")
model_owlViT = OwlViTForObjectDetection(owlViT_config)
else:
model_owlViT = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch16")
self.vision_model = model_owlViT.owlvit.vision_model
self.class_head = model_owlViT.class_head
self.box_head = model_owlViT.box_head
self.layer_norm = model_owlViT.layer_norm
self.sigmoid = nn.Sigmoid()
del model_owlViT
self.matcher = HungarianMatcher(cost_class=2, cost_bbox=5, cost_giou=2)
self.weight_dict = {'loss_ce': 2, 'loss_bbox': 5, 'loss_giou': 2}
self.losses = ['labels', 'boxes']
# num_classes, matcher, weight_dict, losses, focal_alpha=0.25
self.criterion = SetCriterion(num_classes, self.matcher, self.weight_dict, self.losses, focal_alpha=0.25)
def normalize_grid_corner_coordinates(self, feature_map: torch.FloatTensor):
# Computes normalized xy corner coordinates from feature_map.
if not feature_map.ndim == 4:
raise ValueError("Expected input shape is [batch_size, num_patches, num_patches, hidden_dim]")
device = feature_map.device
num_patches = feature_map.shape[1]
box_coordinates = np.stack(
np.meshgrid(np.arange(1, num_patches + 1), np.arange(1, num_patches + 1)), axis=-1
).astype(np.float32)
box_coordinates /= np.array([num_patches, num_patches], np.float32)
# Flatten (h, w, 2) -> (h*w, 2)
box_coordinates = box_coordinates.reshape(
box_coordinates.shape[0] * box_coordinates.shape[1], box_coordinates.shape[2]
)
box_coordinates = torch.from_numpy(box_coordinates).to(device)
return box_coordinates
def compute_box_bias(self, feature_map: torch.FloatTensor) -> torch.FloatTensor:
# The box center is biased to its position on the feature grid
box_coordinates = self.normalize_grid_corner_coordinates(feature_map)
box_coordinates = torch.clip(box_coordinates, 0.0, 1.0)
# Unnormalize xy
box_coord_bias = torch.log(box_coordinates + 1e-4) - torch.log1p(-box_coordinates + 1e-4)
# The box size is biased to the patch size
box_size = torch.full_like(box_coord_bias, 1.0 / feature_map.shape[-2])
box_size_bias = torch.log(box_size + 1e-4) - torch.log1p(-box_size + 1e-4)
# Compute box bias
box_bias = torch.cat([box_coord_bias, box_size_bias], dim=-1)
return box_bias
def box_predictor(
self,
image_feats: torch.FloatTensor,
feature_map: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Args:
image_feats:
Features extracted from the image, returned by the `image_text_embedder` method.
feature_map:
A spatial re-arrangement of image_features, also returned by the `image_text_embedder` method.
Returns:
pred_boxes:
List of predicted boxes (cxcywh normalized to 0, 1) nested within a dictionary.
"""
# Bounding box detection head [batch_size, num_boxes, 4].
pred_boxes = self.box_head(image_feats)
# Compute the location of each token on the grid and use it to compute a bias for the bbox prediction
pred_boxes += self.compute_box_bias(feature_map)
pred_boxes = self.sigmoid(pred_boxes)
return pred_boxes
def class_predictor(
self,
image_feats: torch.FloatTensor,
query_embeds: Optional[torch.FloatTensor] = None,
query_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.FloatTensor]:
"""
Args:
image_feats:
Features extracted from the `image_text_embedder`.
query_embeds:
Text query embeddings.
query_mask:
Must be provided with query_embeddings. A mask indicating which query embeddings are valid.
"""
(pred_logits, image_class_embeds) = self.class_head(image_feats, query_embeds, query_mask)
return (pred_logits, image_class_embeds)
def get_visual_embs(self, image):
vision_outputs = self.vision_model(
pixel_values=image,
output_hidden_states=self.vision_model.config.output_hidden_states,
return_dict=self.vision_model.config.use_return_dict,
)
# Get image embeddings
last_hidden_state = vision_outputs[0]
image_embeds = self.vision_model.post_layernorm(last_hidden_state)
# Resize class token
new_size = tuple(np.array(image_embeds.shape) - np.array((0, 1, 0)))
class_token_out = torch.broadcast_to(image_embeds[:, :1, :], new_size)
# Merge image embedding with class tokens
image_embeds = image_embeds[:, 1:, :] * class_token_out
image_embeds = self.layer_norm(image_embeds)
# Resize to [batch_size, num_patches, num_patches, hidden_size]
new_size = (
image_embeds.shape[0],
int(np.sqrt(image_embeds.shape[1])),
int(np.sqrt(image_embeds.shape[1])),
image_embeds.shape[-1],
)
feature_map = image_embeds.reshape(new_size)
return feature_map
def forward(
self,
image_embeddings: torch.Tensor,
prompt_embeddings: torch.Tensor,
):
feature_map = image_embeddings
batch_size, num_patches, num_patches, hidden_dim = feature_map.shape
image_feats = torch.reshape(feature_map, (batch_size, num_patches * num_patches, hidden_dim))
query_embeds = prompt_embeddings.reshape(batch_size, 1, prompt_embeddings.shape[-1])
# Predict object classes [batch_size, num_patches, num_queries+1]
(pred_logits, class_embeds) = self.class_predictor(image_feats, query_embeds)
# Predict object boxes
pred_boxes = self.box_predictor(image_feats, feature_map)
out = {'pred_logits': pred_logits, 'pred_boxes': pred_boxes}
return out
class SetCriterion(nn.Module):
""" This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
losses: list of all the losses to be applied. See get_loss for list of available losses.
focal_alpha: alpha in Focal Loss
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.losses = losses
self.focal_alpha = focal_alpha
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],
dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)
target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
target_classes_onehot = target_classes_onehot[:,:,:-1]
loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1]
losses = {'loss_ce': loss_ce}
if log:
# TODO this should probably be a separate loss, not hacked in this one here | losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] | 2 | 2023-12-15 14:58:24+00:00 | 8k |
worm128/AI-YinMei | text-generation-webui/extensions/superboogav2/api.py | [
{
"identifier": "ChromaCollector",
"path": "text-generation-webui/extensions/superboogav2/chromadb.py",
"snippet": "class ChromaCollector(Collecter):\n def __init__(self, embedder: Embedder):\n super().__init__()\n self.chroma_client = chromadb.Client(Settings(anonymized_telemetry=False))\n self.embedder = embedder\n self.collection = self.chroma_client.create_collection(name=\"context\", embedding_function=self.embedder.embed)\n self.ids = []\n self.id_to_info = {}\n self.embeddings_cache = {}\n self.lock = threading.Lock() # Locking so the server doesn't break.\n\n def add(self, texts: list[str], texts_with_context: list[str], starting_indices: list[int], metadatas: list[dict] = None):\n with self.lock:\n assert metadatas is None or len(metadatas) == len(texts), \"metadatas must be None or have the same length as texts\"\n \n if len(texts) == 0: \n return\n\n new_ids = self._get_new_ids(len(texts))\n\n (existing_texts, existing_embeddings, existing_ids, existing_metas), \\\n (non_existing_texts, non_existing_ids, non_existing_metas) = self._split_texts_by_cache_hit(texts, new_ids, metadatas)\n\n # If there are any already existing texts, add them all at once.\n if existing_texts:\n logger.info(f'Adding {len(existing_embeddings)} cached embeddings.')\n args = {'embeddings': existing_embeddings, 'documents': existing_texts, 'ids': existing_ids}\n if metadatas is not None: \n args['metadatas'] = existing_metas\n self.collection.add(**args)\n\n # If there are any non-existing texts, compute their embeddings all at once. Each call to embed has significant overhead.\n if non_existing_texts:\n non_existing_embeddings = self.embedder.embed(non_existing_texts).tolist()\n for text, embedding in zip(non_existing_texts, non_existing_embeddings):\n self.embeddings_cache[text] = embedding\n\n logger.info(f'Adding {len(non_existing_embeddings)} new embeddings.')\n args = {'embeddings': non_existing_embeddings, 'documents': non_existing_texts, 'ids': non_existing_ids}\n if metadatas is not None: \n args['metadatas'] = non_existing_metas\n self.collection.add(**args)\n\n # Create a dictionary that maps each ID to its context and starting index\n new_info = {\n id_: {'text_with_context': context, 'start_index': start_index}\n for id_, context, start_index in zip(new_ids, texts_with_context, starting_indices)\n }\n\n self.id_to_info.update(new_info)\n self.ids.extend(new_ids)\n\n \n def _split_texts_by_cache_hit(self, texts: list[str], new_ids: list[str], metadatas: list[dict]):\n existing_texts, non_existing_texts = [], []\n existing_embeddings = []\n existing_ids, non_existing_ids = [], []\n existing_metas, non_existing_metas = [], []\n\n for i, text in enumerate(texts):\n id_ = new_ids[i]\n metadata = metadatas[i] if metadatas is not None else None\n embedding = self.embeddings_cache.get(text)\n if embedding:\n existing_texts.append(text)\n existing_embeddings.append(embedding)\n existing_ids.append(id_)\n existing_metas.append(metadata)\n else:\n non_existing_texts.append(text)\n non_existing_ids.append(id_)\n non_existing_metas.append(metadata)\n\n return (existing_texts, existing_embeddings, existing_ids, existing_metas), \\\n (non_existing_texts, non_existing_ids, non_existing_metas)\n\n\n def _get_new_ids(self, num_new_ids: int):\n if self.ids:\n max_existing_id = max(int(id_) for id_ in self.ids)\n else:\n max_existing_id = -1\n\n return [str(i + max_existing_id + 1) for i in range(num_new_ids)]\n\n \n def _find_min_max_start_index(self):\n max_index, min_index = 0, float('inf')\n for _, val in self.id_to_info.items():\n if val['start_index'] > max_index:\n max_index = val['start_index']\n if val['start_index'] < min_index:\n min_index = val['start_index']\n return min_index, max_index\n\n\n # NB: Does not make sense to weigh excerpts from different documents. \n # But let's say that's the user's problem. Perfect world scenario:\n # Apply time weighing to different documents. For each document, then, add\n # separate time weighing.\n def _apply_sigmoid_time_weighing(self, infos: list[Info], document_len: int, time_steepness: float, time_power: float):\n sigmoid = lambda x: 1 / (1 + np.exp(-x))\n \n weights = sigmoid(time_steepness * np.linspace(-10, 10, document_len))\n\n # Scale to [0,time_power] and shift it up to [1-time_power, 1]\n weights = weights - min(weights) \n weights = weights * (time_power / max(weights))\n weights = weights + (1 - time_power) \n\n # Reverse the weights\n weights = weights[::-1] \n\n for info in infos:\n index = info.start_index\n info.distance *= weights[index]\n\n\n def _filter_outliers_by_median_distance(self, infos: list[Info], significant_level: float):\n # Ensure there are infos to filter\n if not infos:\n return []\n \n # Find info with minimum distance\n min_info = min(infos, key=lambda x: x.distance)\n\n # Calculate median distance among infos\n median_distance = np.median([inf.distance for inf in infos])\n\n # Filter out infos that have a distance significantly greater than the median\n filtered_infos = [inf for inf in infos if inf.distance <= significant_level * median_distance]\n\n # Always include the info with minimum distance\n if min_info not in filtered_infos:\n filtered_infos.append(min_info)\n\n return filtered_infos\n\n\n def _merge_infos(self, infos: list[Info]):\n merged_infos = []\n current_info = infos[0]\n\n for next_info in infos[1:]:\n merged = current_info.merge_with(next_info)\n if merged is not None:\n current_info = merged\n else:\n merged_infos.append(current_info)\n current_info = next_info\n\n merged_infos.append(current_info)\n return merged_infos\n\n\n # Main function for retrieving chunks by distance. It performs merging, time weighing, and mean filtering.\n def _get_documents_ids_distances(self, search_strings: list[str], n_results: int):\n n_results = min(len(self.ids), n_results)\n if n_results == 0:\n return [], [], []\n\n if isinstance(search_strings, str):\n search_strings = [search_strings]\n\n infos = []\n min_start_index, max_start_index = self._find_min_max_start_index()\n\n for search_string in search_strings:\n result = self.collection.query(query_texts=search_string, n_results=math.ceil(n_results / len(search_strings)), include=['distances'])\n curr_infos = [Info(start_index=self.id_to_info[id]['start_index'], \n text_with_context=self.id_to_info[id]['text_with_context'], \n distance=distance, id=id) \n for id, distance in zip(result['ids'][0], result['distances'][0])]\n \n self._apply_sigmoid_time_weighing(infos=curr_infos, document_len=max_start_index - min_start_index + 1, time_steepness=parameters.get_time_steepness(), time_power=parameters.get_time_power())\n curr_infos = self._filter_outliers_by_median_distance(curr_infos, parameters.get_significant_level())\n infos.extend(curr_infos)\n\n infos.sort(key=lambda x: x.start_index)\n infos = self._merge_infos(infos)\n\n texts_with_context = [inf.text_with_context for inf in infos]\n ids = [inf.id for inf in infos]\n distances = [inf.distance for inf in infos]\n\n return texts_with_context, ids, distances\n \n\n # Get chunks by similarity\n def get(self, search_strings: list[str], n_results: int) -> list[str]:\n with self.lock:\n documents, _, _ = self._get_documents_ids_distances(search_strings, n_results)\n return documents\n \n\n # Get ids by similarity\n def get_ids(self, search_strings: list[str], n_results: int) -> list[str]:\n with self.lock:\n _, ids, _ = self._get_documents_ids_distances(search_strings, n_results)\n return ids\n \n \n # Cutoff token count\n def _get_documents_up_to_token_count(self, documents: list[str], max_token_count: int):\n # TODO: Move to caller; We add delimiters there which might go over the limit.\n current_token_count = 0\n return_documents = []\n\n for doc in documents:\n doc_tokens = encode(doc)[0]\n doc_token_count = len(doc_tokens)\n if current_token_count + doc_token_count > max_token_count:\n # If adding this document would exceed the max token count,\n # truncate the document to fit within the limit.\n remaining_tokens = max_token_count - current_token_count\n \n truncated_doc = decode(doc_tokens[:remaining_tokens], skip_special_tokens=True)\n return_documents.append(truncated_doc)\n break\n else:\n return_documents.append(doc)\n current_token_count += doc_token_count\n\n return return_documents\n \n\n # Get chunks by similarity and then sort by ids\n def get_sorted_by_ids(self, search_strings: list[str], n_results: int, max_token_count: int) -> list[str]:\n with self.lock:\n documents, ids, _ = self._get_documents_ids_distances(search_strings, n_results)\n sorted_docs = [x for _, x in sorted(zip(ids, documents))]\n\n return self._get_documents_up_to_token_count(sorted_docs, max_token_count)\n \n \n # Get chunks by similarity and then sort by distance (lowest distance is last).\n def get_sorted_by_dist(self, search_strings: list[str], n_results: int, max_token_count: int) -> list[str]:\n with self.lock:\n documents, _, distances = self._get_documents_ids_distances(search_strings, n_results)\n sorted_docs = [doc for doc, _ in sorted(zip(documents, distances), key=lambda x: x[1])] # sorted lowest -> highest\n \n # If a document is truncated or competely skipped, it would be with high distance.\n return_documents = self._get_documents_up_to_token_count(sorted_docs, max_token_count)\n return_documents.reverse() # highest -> lowest\n\n return return_documents\n \n\n def delete(self, ids_to_delete: list[str], where: dict):\n with self.lock:\n ids_to_delete = self.collection.get(ids=ids_to_delete, where=where)['ids']\n self.collection.delete(ids=ids_to_delete, where=where)\n\n # Remove the deleted ids from self.ids and self.id_to_info\n ids_set = set(ids_to_delete)\n self.ids = [id_ for id_ in self.ids if id_ not in ids_set]\n for id_ in ids_to_delete:\n self.id_to_info.pop(id_, None)\n\n logger.info(f'Successfully deleted {len(ids_to_delete)} records from chromaDB.')\n\n\n def clear(self):\n with self.lock:\n self.chroma_client.reset()\n self.collection = self.chroma_client.create_collection(\"context\", embedding_function=self.embedder.embed)\n self.ids = []\n self.id_to_info = {}\n\n logger.info('Successfully cleared all records and reset chromaDB.')"
},
{
"identifier": "process_and_add_to_collector",
"path": "text-generation-webui/extensions/superboogav2/data_processor.py",
"snippet": "def process_and_add_to_collector(corpus: str, collector: ChromaCollector, clear_collector_before_adding: bool, metadata: dict):\n # Defining variables\n chunk_lens = [int(len.strip()) for len in parameters.get_chunk_len().split(',')]\n context_len = [int(len.strip()) for len in parameters.get_context_len().split(',')]\n if len(context_len) >= 3:\n raise f\"Context len has too many values: {len(context_len)}\"\n if len(context_len) == 2:\n context_left = context_len[0]\n context_right = context_len[1]\n else:\n context_left = context_right = context_len[0]\n\n data_chunks = []\n data_chunks_with_context = []\n data_chunk_starting_indices = []\n\n # Handling chunk_regex\n if parameters.get_chunk_regex():\n if parameters.get_chunk_separator():\n cumulative_length = 0 # This variable will store the length of the processed corpus\n sections = corpus.split(parameters.get_chunk_separator())\n for section in sections:\n special_chunks = list(re.finditer(parameters.get_chunk_regex(), section))\n for match in special_chunks:\n chunk = match.group(0)\n start_index = match.start()\n end_index = start_index + len(chunk)\n context = section[max(0, start_index - context_left):min(len(section), end_index + context_right)]\n data_chunks.append(chunk)\n data_chunks_with_context.append(context)\n data_chunk_starting_indices.append(cumulative_length + max(0, start_index - context_left))\n cumulative_length += len(section) + len(parameters.get_chunk_separator()) # Update the length of the processed corpus\n else:\n special_chunks = list(re.finditer(parameters.get_chunk_regex(), corpus))\n for match in special_chunks:\n chunk = match.group(0)\n start_index = match.start()\n end_index = start_index + len(chunk)\n context = corpus[max(0, start_index - context_left):min(len(corpus), end_index + context_right)]\n data_chunks.append(chunk)\n data_chunks_with_context.append(context)\n data_chunk_starting_indices.append(max(0, start_index - context_left))\n\n for chunk_len in chunk_lens:\n # Breaking the data into chunks and adding those to the db\n if parameters.get_chunk_separator():\n cumulative_length = 0 # This variable will store the length of the processed corpus\n sections = corpus.split(parameters.get_chunk_separator())\n for section in sections:\n chunks, chunks_with_context, context_start_indices = _create_chunks_with_context(section, chunk_len, context_left, context_right)\n context_start_indices = [cumulative_length + i for i in context_start_indices] # Add the length of the processed corpus to each start index\n data_chunks.extend(chunks)\n data_chunks_with_context.extend(chunks_with_context)\n data_chunk_starting_indices.extend(context_start_indices)\n cumulative_length += len(section) + len(parameters.get_chunk_separator()) # Update the length of the processed corpus\n else:\n chunks, chunks_with_context, context_start_indices = _create_chunks_with_context(corpus, chunk_len, context_left, context_right)\n data_chunks.extend(chunks)\n data_chunks_with_context.extend(chunks_with_context)\n data_chunk_starting_indices.extend(context_start_indices)\n\n data_chunks = [preprocess_text_no_summary(chunk) for chunk in data_chunks]\n\n data_chunks, data_chunks_with_context, data_chunk_starting_indices = _clear_chunks(\n data_chunks, data_chunks_with_context, data_chunk_starting_indices\n )\n\n if clear_collector_before_adding:\n collector.clear()\n collector.add(data_chunks, data_chunks_with_context, data_chunk_starting_indices, [metadata]*len(data_chunks) if metadata is not None else None)"
}
] | import json
import extensions.superboogav2.parameters as parameters
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
from urllib.parse import urlparse, parse_qs
from threading import Thread
from modules import shared
from modules.logging_colors import logger
from .chromadb import ChromaCollector
from .data_processor import process_and_add_to_collector | 4,544 | """
This module is responsible for the VectorDB API. It currently supports:
* DELETE api/v1/clear
- Clears the whole DB.
* POST api/v1/add
- Add some corpus to the DB. You can also specify metadata to be added alongside it.
* POST api/v1/delete
- Delete specific records with given metadata.
* POST api/v1/get
- Get results from chromaDB.
"""
class CustomThreadingHTTPServer(ThreadingHTTPServer):
def __init__(self, server_address, RequestHandlerClass, collector: ChromaCollector, bind_and_activate=True):
self.collector = collector
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
def finish_request(self, request, client_address):
self.RequestHandlerClass(request, client_address, self, self.collector)
class Handler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server, collector: ChromaCollector):
self.collector = collector
super().__init__(request, client_address, server)
def _send_412_error(self, message):
self.send_response(412)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps({"error": message})
self.wfile.write(response.encode('utf-8'))
def _send_404_error(self):
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps({"error": "Resource not found"})
self.wfile.write(response.encode('utf-8'))
def _send_400_error(self, error_message: str):
self.send_response(400)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps({"error": error_message})
self.wfile.write(response.encode('utf-8'))
def _send_200_response(self, message: str):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
if isinstance(message, str):
response = json.dumps({"message": message})
else:
response = json.dumps(message)
self.wfile.write(response.encode('utf-8'))
def _handle_get(self, search_strings: list[str], n_results: int, max_token_count: int, sort_param: str):
if sort_param == parameters.SORT_DISTANCE:
results = self.collector.get_sorted_by_dist(search_strings, n_results, max_token_count)
elif sort_param == parameters.SORT_ID:
results = self.collector.get_sorted_by_id(search_strings, n_results, max_token_count)
else: # Default is dist
results = self.collector.get_sorted_by_dist(search_strings, n_results, max_token_count)
return {
"results": results
}
def do_GET(self):
self._send_404_error()
def do_POST(self):
try:
content_length = int(self.headers['Content-Length'])
body = json.loads(self.rfile.read(content_length).decode('utf-8'))
parsed_path = urlparse(self.path)
path = parsed_path.path
query_params = parse_qs(parsed_path.query)
if path in ['/api/v1/add', '/api/add']:
corpus = body.get('corpus')
if corpus is None:
self._send_412_error("Missing parameter 'corpus'")
return
clear_before_adding = body.get('clear_before_adding', False)
metadata = body.get('metadata')
| """
This module is responsible for the VectorDB API. It currently supports:
* DELETE api/v1/clear
- Clears the whole DB.
* POST api/v1/add
- Add some corpus to the DB. You can also specify metadata to be added alongside it.
* POST api/v1/delete
- Delete specific records with given metadata.
* POST api/v1/get
- Get results from chromaDB.
"""
class CustomThreadingHTTPServer(ThreadingHTTPServer):
def __init__(self, server_address, RequestHandlerClass, collector: ChromaCollector, bind_and_activate=True):
self.collector = collector
super().__init__(server_address, RequestHandlerClass, bind_and_activate)
def finish_request(self, request, client_address):
self.RequestHandlerClass(request, client_address, self, self.collector)
class Handler(BaseHTTPRequestHandler):
def __init__(self, request, client_address, server, collector: ChromaCollector):
self.collector = collector
super().__init__(request, client_address, server)
def _send_412_error(self, message):
self.send_response(412)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps({"error": message})
self.wfile.write(response.encode('utf-8'))
def _send_404_error(self):
self.send_response(404)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps({"error": "Resource not found"})
self.wfile.write(response.encode('utf-8'))
def _send_400_error(self, error_message: str):
self.send_response(400)
self.send_header("Content-type", "application/json")
self.end_headers()
response = json.dumps({"error": error_message})
self.wfile.write(response.encode('utf-8'))
def _send_200_response(self, message: str):
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
if isinstance(message, str):
response = json.dumps({"message": message})
else:
response = json.dumps(message)
self.wfile.write(response.encode('utf-8'))
def _handle_get(self, search_strings: list[str], n_results: int, max_token_count: int, sort_param: str):
if sort_param == parameters.SORT_DISTANCE:
results = self.collector.get_sorted_by_dist(search_strings, n_results, max_token_count)
elif sort_param == parameters.SORT_ID:
results = self.collector.get_sorted_by_id(search_strings, n_results, max_token_count)
else: # Default is dist
results = self.collector.get_sorted_by_dist(search_strings, n_results, max_token_count)
return {
"results": results
}
def do_GET(self):
self._send_404_error()
def do_POST(self):
try:
content_length = int(self.headers['Content-Length'])
body = json.loads(self.rfile.read(content_length).decode('utf-8'))
parsed_path = urlparse(self.path)
path = parsed_path.path
query_params = parse_qs(parsed_path.query)
if path in ['/api/v1/add', '/api/add']:
corpus = body.get('corpus')
if corpus is None:
self._send_412_error("Missing parameter 'corpus'")
return
clear_before_adding = body.get('clear_before_adding', False)
metadata = body.get('metadata') | process_and_add_to_collector(corpus, self.collector, clear_before_adding, metadata) | 1 | 2023-12-20 14:13:38+00:00 | 8k |
sinoyou/nelf-pro | nerfstudio/models/base_model.py | [
{
"identifier": "RayBundle",
"path": "nerfstudio/cameras/rays.py",
"snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples"
},
{
"identifier": "InstantiateConfig",
"path": "nerfstudio/configs/base_config.py",
"snippet": "class InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\n \"\"\"Config class for instantiating an the class specified in the _target attribute.\"\"\"\n\n _target: Type\n\n def setup(self, **kwargs) -> Any:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n return self._target(self, **kwargs)"
},
{
"identifier": "to_immutable_dict",
"path": "nerfstudio/configs/config_utils.py",
"snippet": "def to_immutable_dict(d: Dict[str, Any]):\n \"\"\"Method to convert mutable dict to default factory dict\n\n Args:\n d: dictionary to convert into default factory dict for dataclass\n \"\"\"\n return field(default_factory=lambda: dict(d))"
},
{
"identifier": "SceneBox",
"path": "nerfstudio/data/scene_box.py",
"snippet": "class SceneBox:\n \"\"\"Data to represent the scene box.\"\"\"\n\n aabb: TensorType[2, 3] = None\n \"\"\"aabb: axis-aligned bounding box.\n aabb[0] is the minimum (x,y,z) point.\n aabb[1] is the maximum (x,y,z) point.\"\"\"\n coarse_binary_gird: Optional[torch.Tensor] = None\n \"\"\"coarse binary grid computed from sparse colmap point cloud, currently only used in neuralrecon in the wild\"\"\"\n near: Optional[float] = 0.1\n \"\"\"near plane for each image\"\"\"\n far: Optional[float] = 6.0\n \"\"\"far plane for each image\"\"\"\n radius: Optional[float] = 1.0\n \"\"\"radius of sphere\"\"\"\n collider_type: Literal[\"box\", \"near_far\", \"sphere\"] = \"box\"\n \"\"\"collider type for each ray, default is box\"\"\"\n\n def get_diagonal_length(self):\n \"\"\"Returns the longest diagonal length.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n length = torch.sqrt((diff**2).sum() + 1e-20)\n return length\n\n def get_center(self):\n \"\"\"Returns the center of the box.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n return self.aabb[0] + diff / 2.0\n\n def get_centered_and_scaled_scene_box(self, scale_factor: Union[float, torch.Tensor] = 1.0):\n \"\"\"Returns a new box that has been shifted and rescaled to be centered\n about the origin.\n\n Args:\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n return SceneBox(aabb=(self.aabb - self.get_center()) * scale_factor)\n\n @staticmethod\n def get_normalized_positions(positions: TensorType[..., 3], aabb: TensorType[2, 3]):\n \"\"\"Return normalized positions in range [0, 1] based on the aabb axis-aligned bounding box.\n\n Args:\n positions: the xyz positions\n aabb: the axis-aligned bounding box\n \"\"\"\n aabb_lengths = aabb[1] - aabb[0]\n normalized_positions = (positions - aabb[0]) / aabb_lengths\n return normalized_positions\n\n def to_json(self) -> Dict:\n \"\"\"Returns a json object from the Python object.\"\"\"\n return {\"type\": \"aabb\", \"min_point\": self.aabb[0].tolist(), \"max_point\": self.aabb[1].tolist()}\n\n @staticmethod\n def from_json(json_: Dict) -> \"SceneBox\":\n \"\"\"Returns the an instance of SceneBox from a json dictionary.\n\n Args:\n json_: the json dictionary containing scene box information\n \"\"\"\n assert json_[\"type\"] == \"aabb\"\n aabb = torch.tensor([json_[0], json_[1]])\n return SceneBox(aabb=aabb)\n\n @staticmethod\n def from_camera_poses(poses: TensorType[..., 3, 4], scale_factor: float) -> \"SceneBox\":\n \"\"\"Returns the instance of SceneBox that fully envelopes a set of poses\n\n Args:\n poses: tensor of camera pose matrices\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n xyzs = poses[..., :3, -1]\n aabb = torch.stack([torch.min(xyzs, dim=0)[0], torch.max(xyzs, dim=0)[0]])\n return SceneBox(aabb=aabb * scale_factor)"
},
{
"identifier": "TrainingCallback",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callbak (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int):\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation):\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)"
},
{
"identifier": "TrainingCallbackAttributes",
"path": "nerfstudio/engine/callbacks.py",
"snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"\n config: TrainerConfig\n \"\"\"the trainer config\"\"\""
},
{
"identifier": "NearFarCollider",
"path": "nerfstudio/model_components/scene_colliders.py",
"snippet": "class NearFarCollider(SceneCollider):\n \"\"\"Sets the nears and fars with fixed values.\n\n Args:\n near_plane: distance to near plane\n far_plane: distance to far plane\n \"\"\"\n\n def __init__(self, near_plane: float, far_plane: float, **kwargs) -> None:\n self.near_plane = near_plane\n self.far_plane = far_plane\n super().__init__(**kwargs)\n\n def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:\n ones = torch.ones_like(ray_bundle.origins[..., 0:1])\n near_plane = self.near_plane if self.training else self.near_plane # 0\n ray_bundle.nears = ones * near_plane\n ray_bundle.fars = ones * self.far_plane\n return ray_bundle"
}
] | from abc import abstractmethod
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple, Type
from torch import nn
from torch.nn import Parameter
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.configs.base_config import InstantiateConfig
from nerfstudio.configs.config_utils import to_immutable_dict
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes
from nerfstudio.model_components.scene_colliders import NearFarCollider
import torch | 4,056 | # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Model implementation which takes in RayBundles
"""
from __future__ import annotations
# Model related configs
@dataclass
class ModelConfig(InstantiateConfig):
"""Configuration for model instantiation"""
_target: Type = field(default_factory=lambda: Model)
"""target class to instantiate"""
enable_collider: bool = True
"""Whether to create a scene collider to filter rays."""
collider_params: Optional[Dict[str, float]] = to_immutable_dict({"near_plane": 2.0, "far_plane": 6.0})
"""parameters to instantiate scene collider with"""
loss_coefficients: Dict[str, float] = to_immutable_dict({"rgb_loss_coarse": 1.0, "rgb_loss_fine": 1.0})
"""parameters to instantiate density field with"""
eval_num_rays_per_chunk: int = 4096
"""specifies number of rays per chunk during eval"""
class Model(nn.Module):
"""Model class
Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be
subclassed for custom NeRF model.
Args:
config: configuration for instantiating model
scene_box: dataset scene box
"""
config: ModelConfig
def __init__(
self,
config: ModelConfig,
scene_box: SceneBox,
num_train_data: int,
world_size: int = 1,
local_rank: int = 0,
load_step: int = None,
**kwargs,
) -> None:
super().__init__()
self.config = config
self.scene_box = scene_box
self.num_train_data = num_train_data
self.kwargs = kwargs
self.collider = None
self.world_size = world_size
self.local_rank = local_rank
self.load_step = load_step
self.populate_modules() # populate the modules
self.callbacks = None
# to keep track of which device the nn.Module is on
self.device_indicator_param = nn.Parameter(torch.empty(0))
@property
def device(self):
"""Returns the device that the model is on."""
return self.device_indicator_param.device
def get_training_callbacks( # pylint:disable=no-self-use
self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument
) -> List[TrainingCallback]:
"""Returns a list of callbacks that run functions at the specified training iterations."""
return []
def populate_modules(self):
"""Set the necessary modules to get the network working."""
# default instantiates optional modules that are common among many networks
# NOTE: call `super().populate_modules()` in subclasses
if self.config.enable_collider:
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base Model implementation which takes in RayBundles
"""
from __future__ import annotations
# Model related configs
@dataclass
class ModelConfig(InstantiateConfig):
"""Configuration for model instantiation"""
_target: Type = field(default_factory=lambda: Model)
"""target class to instantiate"""
enable_collider: bool = True
"""Whether to create a scene collider to filter rays."""
collider_params: Optional[Dict[str, float]] = to_immutable_dict({"near_plane": 2.0, "far_plane": 6.0})
"""parameters to instantiate scene collider with"""
loss_coefficients: Dict[str, float] = to_immutable_dict({"rgb_loss_coarse": 1.0, "rgb_loss_fine": 1.0})
"""parameters to instantiate density field with"""
eval_num_rays_per_chunk: int = 4096
"""specifies number of rays per chunk during eval"""
class Model(nn.Module):
"""Model class
Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be
subclassed for custom NeRF model.
Args:
config: configuration for instantiating model
scene_box: dataset scene box
"""
config: ModelConfig
def __init__(
self,
config: ModelConfig,
scene_box: SceneBox,
num_train_data: int,
world_size: int = 1,
local_rank: int = 0,
load_step: int = None,
**kwargs,
) -> None:
super().__init__()
self.config = config
self.scene_box = scene_box
self.num_train_data = num_train_data
self.kwargs = kwargs
self.collider = None
self.world_size = world_size
self.local_rank = local_rank
self.load_step = load_step
self.populate_modules() # populate the modules
self.callbacks = None
# to keep track of which device the nn.Module is on
self.device_indicator_param = nn.Parameter(torch.empty(0))
@property
def device(self):
"""Returns the device that the model is on."""
return self.device_indicator_param.device
def get_training_callbacks( # pylint:disable=no-self-use
self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument
) -> List[TrainingCallback]:
"""Returns a list of callbacks that run functions at the specified training iterations."""
return []
def populate_modules(self):
"""Set the necessary modules to get the network working."""
# default instantiates optional modules that are common among many networks
# NOTE: call `super().populate_modules()` in subclasses
if self.config.enable_collider: | self.collider = NearFarCollider( | 6 | 2023-12-15 20:07:22+00:00 | 8k |
Infleqtion/qLDPC | qldpc/codes.py | [
{
"identifier": "abstract",
"path": "qldpc/abstract.py",
"snippet": "DEFAULT_FIELD_ORDER = 2\nclass GroupMember(comb.Permutation):\nclass Group:\nclass Element:\nclass Protograph:\nclass TrivialGroup(Group):\nclass CyclicGroup(Group):\nclass DihedralGroup(Group):\nclass QuaternionGroup(Group):\n def __mul__(self, other: UnknownType) -> UnknownType:\n def __add__(self, other: UnknownType) -> UnknownType:\n def __lt__(self, other: GroupMember) -> bool:\n def __matmul__(self, other: GroupMember) -> GroupMember:\ndef default_lift(member: GroupMember) -> IntegerArray:\n def __init__(\n self, group: PermutationGroup, field: int | None = None, lift: Lift | None = None\n ) -> None:\n def __eq__(self, other: object) -> bool:\n def __mul__(self, other: Group) -> Group:\n def lift(member: GroupMember) -> galois.FieldArray:\n def __contains__(self, member: GroupMember) -> bool:\n def field(self) -> type[galois.FieldArray]:\n def order(self) -> int:\n def generators(self) -> Sequence[GroupMember]:\n def generate(self) -> Iterator[GroupMember]:\n def identity(self) -> GroupMember:\n def product(cls, *groups: Group, repeat: int = 1) -> Group:\n def lift(self, member: GroupMember) -> galois.FieldArray:\n def lift_dim(self) -> int:\n def table(self) -> IntegerArray:\n def from_table(\n cls,\n table: IntegerArray | Sequence[Sequence[int]],\n field: int | None = None,\n integer_lift: IntegerLift | None = None,\n ) -> Group:\n def lift(member: GroupMember) -> IntegerArray:\n def from_generators(\n cls, *generators: GroupMember, field: int | None = None, lift: Lift | None = None\n ) -> Group:\n def __init__(self, group: Group, *members: GroupMember):\n def __eq__(self, other: object) -> bool:\n def __iter__(self) -> Iterator[tuple[GroupMember, galois.FieldArray]]:\n def __add__(self, other: GroupMember | Element) -> Element:\n def __radd__(self, other: GroupMember) -> Element:\n def __mul__(self, other: int | GroupMember | Element) -> Element:\n def __rmul__(self, other: int | GroupMember) -> Element:\n def __neg__(self) -> Element:\n def __pow__(self, power: int) -> Element:\n def copy(self) -> Element:\n def field(self) -> type[galois.FieldArray]:\n def group(self) -> Group:\n def lift(self) -> galois.FieldArray:\n def zero(self) -> Element:\n def one(self) -> Element:\n def T(self) -> Element:\n def __init__(self, matrix: Protograph | ObjectMatrix) -> None:\n def __eq__(self, other: object) -> bool:\n def __rmul__(self, val: int) -> Protograph:\n def __mul__(self, val: int) -> Protograph:\n def matrix(self) -> npt.NDArray[np.object_]:\n def shape(self) -> tuple[int, ...]:\n def group(self) -> Group:\n def field(self) -> type[galois.FieldArray]:\n def lift(self) -> galois.FieldArray:\n def T(self) -> Protograph:\n def build(cls, group: Group, matrix: ObjectMatrix, *, field: int = 2) -> Protograph:\n def __init__(self, field: int | None = None) -> None:\n def to_protograph(\n cls, matrix: IntegerArray | Sequence[Sequence[int]], field: int | None = None\n ) -> Protograph:\n def __init__(self, order: int) -> None:\n def __init__(self, order: int) -> None:\n def __init__(self) -> None:\n def lift(member: int) -> IntegerArray:"
},
{
"identifier": "CayleyComplex",
"path": "qldpc/objects.py",
"snippet": "class CayleyComplex:\n \"\"\"Left-right Cayley complex, used for constructing quantum Tanner codes.\n\n A Cayley complex is a geometric structure built out of a two subsets A and B of a group G. The\n subsets respectively act on elements of G from the left and right, and must be symmetric, which\n is to say (for example) that a ∈ A iff a^-1 ∈ A. To avoid constructing a complex that factors\n into disconnected pieces, we can define G as the group generated by all elements of A and B.\n\n The generating data (A,B) is used to build vertices V, edges E, and faces F as follows:\n - vertices are members of G,\n - edges have the form (g, ag) and (g, gb), and\n - faces f(g,a,b) have the form {g, ab, gb, agb}:\n\n g → gb\n ↓ ↓\n ag → agb\n\n The complex (V,E,F) is in turn used to construct two bipartite directed graphs:\n - subgraph_0 with edges ( g, f(g,a,b)), and\n - subgraph_1 with edges (ag, f(g,a,b)).\n These graphs are used to construct classical Tanner codes that serve as the X and Z sectors of a\n quantum CSS code (namely, a quantum Tanner code).\n\n There are, however, two complications to keep in mind. First, in order for the faces to be non\n degenerate (that is, for each face to contain four vertices), the generating data (A,B) must\n satisfy the Total No Conjugacy condition:\n\n [1] ag != gb for all g,a,b in (G,A,B).\n\n Second, in order to construct a valid quantum Tanner code out of subgraph_0 and subgraph_1, the\n graph (V,E) must be bipartite, V = V_0 ∪ V_1, such that (for example) nodes {g,agb} are in one\n partition, while nodes {ag,gb} are in the other partition. The nodes V_i are then used as the\n sources of subgraph_i. The graph (V,E) is bipartite if:\n\n [2] The Cayley graphs (G;A) and (G;B) both are bipartite.\n\n The Cayley graphs (G;A) and (G;B) are graphs whose\n - vertices are members of G, and\n - edges are pairs of vertices connected by A or B, as in (g, ag) or (g, gb).\n\n If both [1] and [2] are satisfied, when we can construct a Cayley complex out of (G,A,B)\n directly, which we call a \"rank-0\" complex.\n\n If [1] is satisfied but [2] is not, then we can construct a \"rank-1\" complex that enforces\n requirement [2] by taking the double cover of G and modifying members of A and B as:\n - G --> G ⊗ {0,1},\n - a --> (a,1), and\n - b --> (b,1),\n where (a,1) acts on (g,i) as (a,1) * (g,i) = (ag,i+1), and similarly (b,1) * (g,i) = (gb,i+1).\n\n If requirement [1] is not satisfied, then we can construct a \"rank-2\" complex that enforces both\n [1] and [2] by taking the quadruple cover of G and modifying members of A and B as:\n - G --> G ⊗ {0,1} ⊗ {0,1},\n - a --> (a,1,0), and\n - b --> (b,0,1),\n where similarly to before (a,1,0) * (g,i,j) = (ag,i+1,j) and (b,0,1) * (g,i,j) = (gb,i,j+1).\n\n References:\n - https://arxiv.org/abs/2202.13641\n - https://arxiv.org/abs/2206.07571\n - https://www.youtube.com/watch?v=orWcstqWGGo\n \"\"\"\n\n # generating data\n subset_a: set[abstract.GroupMember]\n subset_b: set[abstract.GroupMember]\n group: abstract.Group\n\n # rank and graph (vertices and edges)\n rank: int\n graph: nx.Graph\n faces: set[frozenset[abstract.GroupMember]]\n\n # subgraphs used for a quantum Tanner code\n subgraph_0: nx.DiGraph\n subgraph_1: nx.DiGraph\n\n def __init__(\n self,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember] | None = None,\n *,\n rank: int | None = None,\n ) -> None:\n \"\"\"Construct a left-right Cayley complex.\"\"\"\n assert not rank or 0 <= rank <= 2\n if subset_b is None:\n subset_b = subset_a\n subset_a = set(subset_a)\n subset_b = set(subset_b)\n assert all(~member in subset_a for member in subset_a)\n assert all(~member in subset_b for member in subset_b)\n\n # identify the group generated by the provided (sub)sets\n group = abstract.Group.from_generators(*subset_a, *subset_b)\n\n # determine the rank of this complex\n min_rank = CayleyComplex.get_min_rank(group, subset_a, subset_b)\n if rank is not None and rank < min_rank:\n error = f\"Cannot set CayleyComplex rank to {rank} (min_rank: {min_rank})\"\n raise ValueError(error)\n self.rank = min_rank if rank is None else rank\n\n # take the double cover(s) of the group, if necessary, and save the generating data\n identity, shift = abstract.CyclicGroup(2).generate()\n if self.rank == 2:\n shift_a = shift @ identity\n shift_b = identity @ shift\n elif self.rank == 1:\n shift_a = shift_b = shift\n else: # self.rank == 0\n shift_a = shift_b = abstract.TrivialGroup().identity\n self.subset_a = set(aa @ shift_a for aa in subset_a)\n self.subset_b = set(bb @ shift_b for bb in subset_b)\n self.group = abstract.Group.from_generators(*self.subset_a, *self.subset_b)\n\n # construct the vertices, edges, and faces of this complex\n self.graph = nx.Graph()\n self.faces = set()\n for gg, aa, bb in itertools.product(self.group.generate(), self.subset_a, self.subset_b):\n aa_gg, gg_bb, aa_gg_bb = aa * gg, gg * bb, aa * gg * bb\n face = frozenset([gg, aa_gg, gg_bb, aa_gg_bb])\n self.faces.add(face)\n self.graph.add_edge(gg, aa_gg)\n self.graph.add_edge(gg, gg_bb)\n self.graph.add_edge(aa_gg, aa_gg_bb)\n self.graph.add_edge(gg_bb, aa_gg_bb)\n\n # construct the subgraphs of the complex\n self.subgraph_0 = nx.DiGraph()\n self.subgraph_1 = nx.DiGraph()\n half_group, _ = nx.bipartite.sets(self.graph)\n for gg, aa, bb in itertools.product(half_group, self.subset_a, self.subset_b):\n aa_gg, gg_bb, aa_gg_bb = aa * gg, gg * bb, aa * gg * bb\n face = frozenset([gg, aa_gg, gg_bb, aa_gg_bb])\n self.subgraph_0.add_edge(gg, face, sort=(aa, bb))\n self.subgraph_1.add_edge(aa_gg, face, sort=(~aa, bb))\n\n @classmethod\n def get_min_rank(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> Literal[0, 1, 2]:\n \"\"\"Minimum rank of a Cayley complex built out of the given generating data.\"\"\"\n if not CayleyComplex.satisfies_total_no_conjugacy(group, subset_a, subset_b):\n return 2\n graph_a, graph_b = CayleyComplex.get_cayley_graphs(group, subset_a, subset_b)\n if not nx.is_bipartite(graph_a) or not nx.is_bipartite(graph_b):\n return 1\n return 0\n\n @classmethod\n def satisfies_total_no_conjugacy(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> bool:\n \"\"\"Check the Total No-Conjugacy condition: aa gg != gg bb for all gg, aa, bb.\"\"\"\n return all(\n aa * gg != gg * bb\n for gg, aa, bb in itertools.product(group.generate(), subset_a, subset_b)\n )\n\n @classmethod\n def get_cayley_graphs(\n cls,\n group: abstract.Group,\n subset_a: Collection[abstract.GroupMember],\n subset_b: Collection[abstract.GroupMember],\n ) -> tuple[nx.Graph, nx.Graph]:\n \"\"\"Cayley graphs for the left- and right-acting subsets.\"\"\"\n edges_a = [(gg, aa * gg) for gg in group.generate() for aa in subset_a]\n edges_b = [(gg, gg * bb) for gg in group.generate() for bb in subset_b]\n return nx.Graph(edges_a), nx.Graph(edges_b)"
},
{
"identifier": "Node",
"path": "qldpc/objects.py",
"snippet": "class Node:\n \"\"\"Node in a Tanner graph.\n\n A node essentially an integer index, together with a boolean flag to distinguish \"data\" node\n from a \"check\" node in an error-correcting code.\n \"\"\"\n\n index: int\n is_data: bool = True\n\n def __hash__(self) -> int:\n return hash((self.index, self.is_data))\n\n def __lt__(self, other: Node) -> bool:\n if self.is_data == other.is_data:\n return self.index < other.index\n return self.is_data # data bits \"precede\" check bits\n\n def __str__(self) -> str:\n tag = \"d\" if self.is_data else \"c\"\n return f\"{tag}_{self.index}\""
},
{
"identifier": "Pauli",
"path": "qldpc/objects.py",
"snippet": "class Pauli(enum.Enum):\n \"\"\"Pauli operators.\"\"\"\n\n I = (0, 0) # noqa: E741\n Z = (0, 1)\n X = (1, 0)\n Y = (1, 1)\n\n def __mul__(self, other: Pauli) -> Pauli:\n \"\"\"Product of two Pauli operators.\"\"\"\n val_x = (self.value[0] + other.value[0]) % 2\n val_z = (self.value[1] + other.value[1]) % 2\n return Pauli((val_x, val_z))\n\n def __invert__(self) -> Pauli:\n \"\"\"Hadamard-transform this Pauli operator.\"\"\"\n return Pauli(self.value[::-1])\n\n def __str__(self) -> str:\n if self == Pauli.I:\n return \"I\"\n elif self == Pauli.Z:\n return \"Z\"\n elif self == Pauli.X:\n return \"X\"\n return \"Y\"\n\n @classmethod\n def from_string(cls, string: str) -> Pauli:\n \"\"\"Build a Pauli operator from a string.\"\"\"\n if string == \"I\":\n return Pauli.I\n elif string == \"Z\":\n return Pauli.Z\n elif string == \"X\":\n return Pauli.X\n elif string == \"Y\":\n return Pauli.Y\n raise ValueError(f\"Invalid Pauli operator: {string}\")\n\n @property\n def index(self) -> int:\n \"\"\"Numerical index for Pauli operators.\"\"\"\n if self == Pauli.X:\n return 0\n if self == Pauli.Z:\n return 1\n raise AttributeError(f\"No index for {self}.\")"
},
{
"identifier": "QuditOperator",
"path": "qldpc/objects.py",
"snippet": "class QuditOperator:\n \"\"\"A qudit operator of the form X(val_x)*Z(val_z).\"\"\"\n\n def __init__(self, value: tuple[int, int] = (0, 0)) -> None:\n self.value = value\n\n def __eq__(self, other: object) -> bool:\n return isinstance(other, QuditOperator) and self.value == other.value\n\n def __invert__(self) -> QuditOperator:\n \"\"\"Fourier-transform this qudit operator.\"\"\"\n return QuditOperator(self.value[::-1])\n\n def __str__(self) -> str:\n val_x, val_z = self.value\n if not val_x and not val_z:\n return \"I\"\n if val_x == val_z:\n return f\"Y({val_z})\"\n ops = []\n if val_x:\n ops.append(f\"X({val_x})\")\n if val_z:\n ops.append(f\"Z({val_z})\")\n return \"*\".join(ops)\n\n @classmethod\n def from_string(cls, string: str) -> QuditOperator:\n \"\"\"Build a qudit operator from its string representation.\"\"\"\n if string == \"I\":\n return QuditOperator((0, 0))\n\n invalid_op = f\"Invalid qudit operator: {string}\"\n\n val_x, val_z = 0, 0\n factors = string.split(\"*\")\n if len(factors) > 2:\n raise ValueError(invalid_op)\n\n for factor in factors:\n pauli = factor[0]\n val_str = factor[2:-1]\n _factor = f\"{pauli}({val_str})\"\n if pauli not in \"XYZ\" or not val_str.isnumeric() or factor != _factor:\n raise ValueError(invalid_op)\n\n val = int(val_str)\n if pauli == \"X\":\n val_x = val\n elif pauli == \"Z\":\n val_z = val\n else: # pauli == \"Y\"\n val_x = val_z = val\n\n return QuditOperator((val_x, val_z))"
}
] | import abc
import functools
import itertools
import cachetools
import galois
import ldpc.mod2
import networkx as nx
import numpy as np
import numpy.typing as npt
import qldpc
from collections.abc import Collection, Iterable, Sequence
from typing import TYPE_CHECKING, Literal
from qldpc import abstract
from qldpc.objects import CayleyComplex, Node, Pauli, QuditOperator
from typing_extensions import Self | 5,509 | """Error correction code constructions
Copyright 2023 The qLDPC Authors and Infleqtion Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
if TYPE_CHECKING:
DEFAULT_FIELD_ORDER = abstract.DEFAULT_FIELD_ORDER
################################################################################
# template error correction code classes
class AbstractCode(abc.ABC):
"""Template class for error-correcting codes."""
_field_order: int
def __init__(
self,
matrix: Self | npt.NDArray[np.int_] | Sequence[Sequence[int]],
field: int | None = None,
) -> None:
"""Construct a code from a parity check matrix over a finite field.
The base field is taken to be F_2 by default.
"""
self._matrix: galois.FieldArray
if isinstance(matrix, type(self)):
self._field_order = matrix.field.order
if not (field is None or field == self._field_order):
raise ValueError(
f"Field argument {field} is inconsistent with the given code, which is defined"
f" over F_{self._field_order}"
)
self._matrix = matrix.matrix
elif isinstance(matrix, galois.FieldArray):
self._field_order = type(matrix).order
self._matrix = matrix
else:
self._field_order = field or DEFAULT_FIELD_ORDER
self._matrix = self.field(np.array(matrix))
@property
def field(self) -> type[galois.FieldArray]:
"""Base field over which this code is defined."""
return galois.GF(self._field_order)
@property
def matrix(self) -> galois.FieldArray:
"""Parity check matrix of this code."""
return self._matrix
@functools.cached_property
def graph(self) -> nx.DiGraph:
"""Tanner graph of this code."""
return self.matrix_to_graph(self.matrix)
@classmethod
@abc.abstractmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
@classmethod
@abc.abstractmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
class ClassicalCode(AbstractCode):
"""Classical linear error-correcting code over a finite field F_q.
A classical binary code C = {x} is a set of vectors x (with entries in F_q) called code words.
We consider only linear codes, for which any linear combination of code words is also code word.
Operationally, we define a classical code by a parity check matrix H with dimensions
(num_checks, num_bits). Each row of H represents a linear constraint (a "check") that code
words must satisfy. A vector x is a code word iff H @ x = 0.
"""
def __contains__(self, word: npt.NDArray[np.int_] | Sequence[int]) -> bool:
return not np.any(self.matrix @ self.field(word))
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix H into a Tanner graph.
The Tanner graph is a bipartite graph with (num_checks, num_bits) vertices, respectively
identified with the checks and bits of the code. The check vertex c and the bit vertex b
share an edge iff c addresses b; that is, edge (c, b) is in the graph iff H[c, b] != 0.
"""
graph = nx.DiGraph()
for row, col in zip(*np.nonzero(matrix)):
| """Error correction code constructions
Copyright 2023 The qLDPC Authors and Infleqtion Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
if TYPE_CHECKING:
DEFAULT_FIELD_ORDER = abstract.DEFAULT_FIELD_ORDER
################################################################################
# template error correction code classes
class AbstractCode(abc.ABC):
"""Template class for error-correcting codes."""
_field_order: int
def __init__(
self,
matrix: Self | npt.NDArray[np.int_] | Sequence[Sequence[int]],
field: int | None = None,
) -> None:
"""Construct a code from a parity check matrix over a finite field.
The base field is taken to be F_2 by default.
"""
self._matrix: galois.FieldArray
if isinstance(matrix, type(self)):
self._field_order = matrix.field.order
if not (field is None or field == self._field_order):
raise ValueError(
f"Field argument {field} is inconsistent with the given code, which is defined"
f" over F_{self._field_order}"
)
self._matrix = matrix.matrix
elif isinstance(matrix, galois.FieldArray):
self._field_order = type(matrix).order
self._matrix = matrix
else:
self._field_order = field or DEFAULT_FIELD_ORDER
self._matrix = self.field(np.array(matrix))
@property
def field(self) -> type[galois.FieldArray]:
"""Base field over which this code is defined."""
return galois.GF(self._field_order)
@property
def matrix(self) -> galois.FieldArray:
"""Parity check matrix of this code."""
return self._matrix
@functools.cached_property
def graph(self) -> nx.DiGraph:
"""Tanner graph of this code."""
return self.matrix_to_graph(self.matrix)
@classmethod
@abc.abstractmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix into a Tanner graph."""
@classmethod
@abc.abstractmethod
def graph_to_matrix(cls, graph: nx.DiGraph) -> galois.FieldArray:
"""Convert a Tanner graph into a parity check matrix."""
class ClassicalCode(AbstractCode):
"""Classical linear error-correcting code over a finite field F_q.
A classical binary code C = {x} is a set of vectors x (with entries in F_q) called code words.
We consider only linear codes, for which any linear combination of code words is also code word.
Operationally, we define a classical code by a parity check matrix H with dimensions
(num_checks, num_bits). Each row of H represents a linear constraint (a "check") that code
words must satisfy. A vector x is a code word iff H @ x = 0.
"""
def __contains__(self, word: npt.NDArray[np.int_] | Sequence[int]) -> bool:
return not np.any(self.matrix @ self.field(word))
@classmethod
def matrix_to_graph(cls, matrix: npt.NDArray[np.int_] | Sequence[Sequence[int]]) -> nx.DiGraph:
"""Convert a parity check matrix H into a Tanner graph.
The Tanner graph is a bipartite graph with (num_checks, num_bits) vertices, respectively
identified with the checks and bits of the code. The check vertex c and the bit vertex b
share an edge iff c addresses b; that is, edge (c, b) is in the graph iff H[c, b] != 0.
"""
graph = nx.DiGraph()
for row, col in zip(*np.nonzero(matrix)): | node_c = Node(index=int(row), is_data=False) | 2 | 2023-12-19 22:29:42+00:00 | 8k |
CosmicLaca/ComfyUI_Primere_Nodes | Nodes/modules/image_meta_reader.py | [
{
"identifier": "Automatic1111",
"path": "Nodes/modules/exif/automatic1111.py",
"snippet": "class Automatic1111(BaseFormat):\n def __init__(self, info: dict = None, raw: str = \"\"):\n super().__init__(info, raw)\n if not self._raw:\n self._raw = self._info.get(\"parameters\")\n self.ProcessExif()\n\n def ProcessExif(self):\n exif_string = self._raw\n # print(exif_string)\n\n EXIF_LABELS = {\n \"positive\":'Positive prompt',\n \"negative\":'Negative prompt',\n \"steps\":'Steps',\n \"sampler\":'Sampler',\n \"seed\":'Seed',\n \"variation_seed\":'Variation seed',\n \"variation_seed_strength\":'Variation seed strength',\n \"size_string\":'Size',\n \"model_hash\":'Model hash',\n 'model_name':'Model',\n \"vae_hash\":'VAE hash',\n \"vae\":'VAE',\n \"lora_hashes\":'Lora hashes',\n \"cfg_scale\":'CFG scale',\n \"cfg_rescale\":'CFG Rescale φ',\n \"cfg_rescale_phi\":'CFG Rescale phi',\n \"rp_active\":'RP Active',\n \"rp_divide_mode\":'RP Divide mode',\n \"rp_matrix_submode\":'RP Matrix submode',\n \"rp_mask_submode\":'RP Mask submode',\n \"rp_prompt_submode\":'RP Prompt submode',\n \"rp_calc_mode\":'RP Calc Mode',\n \"rp_ratios\":'RP Ratios',\n \"rp_base_ratios\":'RP Base Ratios',\n \"rp_use_base\":'RP Use Base',\n \"rp_use_common\":'RP Use Common',\n \"rp_use_ncommon\":'RP Use Ncommon',\n \"rp_change_and\":'RP Change AND',\n \"rp_lora_neg_te_ratios\":'RP LoRA Neg Te Ratios',\n \"rp_lora_neg_u_ratios\":'RP LoRA Neg U Ratios',\n \"rp_threshold\":'RP threshold',\n \"npw_weight\":'NPW_weight',\n \"antiburn\":'AntiBurn',\n \"version\":'Version',\n \"template\":'Template',\n \"negative_template\":'Negative Template',\n \"face_restoration\":'Face restoration',\n \"postprocess_upscaler\":'Postprocess upscaler',\n \"postprocess_upscale_by\":'Postprocess upscale by'\n }\n\n LABEL_END = ['\\n', ',']\n STRIP_FROM_VALUE = ' \";\\n'\n FORCE_STRING = ['model_hash', 'vae_hash', 'lora_hashes']\n FORCE_FLOAT = ['cfg_scale', 'cfg_rescale', 'cfg_rescale_phi', 'npw_weight']\n\n # FIRST_ROW = exif_string.split('\\n', 1)[0]\n exif_string = 'Positive prompt: ' + exif_string\n\n SORTED_BY_STRING = dict(sorted(EXIF_LABELS.items(), key=lambda pos: exif_string.find(pos[1] + ':')))\n SORTED_KEYLIST = list(SORTED_BY_STRING.keys())\n FINAL_DICT = {}\n FLOAT_PATTERN = r'^[-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?$'\n\n for LABEL_KEY, LABEL in SORTED_BY_STRING.items():\n NextValue = '\\n'\n RealLabel = LABEL + ':'\n CurrentKeyIndex = (SORTED_KEYLIST.index(LABEL_KEY))\n NextKeyIndex = CurrentKeyIndex + 1\n\n if len(SORTED_KEYLIST) > NextKeyIndex:\n NextKey = SORTED_KEYLIST[NextKeyIndex]\n NextValue = SORTED_BY_STRING[NextKey] + ':'\n\n if RealLabel in exif_string:\n LabelStart = exif_string.find(RealLabel)\n NextLabelStart = exif_string.find(NextValue)\n LabelLength = len(RealLabel)\n ValueStart = exif_string.find(exif_string[(LabelStart + LabelLength):NextLabelStart])\n ValueLength = len(exif_string[(LabelStart + LabelLength):NextLabelStart])\n ValueRaw = exif_string[(LabelStart + LabelLength):NextLabelStart]\n FirstMatch = next((x for x in LABEL_END if x in exif_string[(ValueStart + ValueLength - 2):(ValueStart + ValueLength + 1)]), False)\n\n if CurrentKeyIndex >= 2 and FirstMatch == ',':\n isUnknownValue = all(x in ValueRaw for x in [':', ','])\n if isUnknownValue:\n FirstMatchOfFaliled = ValueRaw.find(FirstMatch)\n NextLabelStart = ValueStart\n\n if FirstMatch:\n LabelEnd = exif_string.find(FirstMatch, NextLabelStart - 2)\n LabelValue = exif_string[(LabelStart + LabelLength):LabelEnd]\n else:\n LabelEnd = None\n if CurrentKeyIndex >= 2 and FirstMatch == '\\n' or FirstMatch == False:\n badValue = exif_string[(LabelStart + LabelLength):LabelEnd]\n isUnknownValue = all(x in badValue for x in [':', '\\n'])\n if isUnknownValue:\n FirstMatchOfFaliled = badValue.find('\\n')\n LabelEnd = exif_string.find('\\n', LabelStart + LabelLength + FirstMatchOfFaliled + 2)\n\n LabelValue = exif_string[(LabelStart + LabelLength):LabelEnd]\n\n LabelValue = LabelValue.replace('Count=', '').strip(STRIP_FROM_VALUE)\n if not LabelValue:\n LabelValue = None\n elif LabelValue == 'False':\n LabelValue = False\n elif LabelValue.isdigit():\n LabelValue = int(LabelValue)\n elif bool(re.match(FLOAT_PATTERN, LabelValue)):\n LabelValue = float(LabelValue)\n\n if LABEL_KEY in FORCE_STRING:\n LabelValue = str(LabelValue)\n\n if LABEL_KEY in FORCE_FLOAT:\n LabelValue = float(LabelValue)\n\n if LABEL_KEY == 'size_string':\n width, height = LabelValue.split(\"x\")\n FINAL_DICT['width'] = int(width)\n FINAL_DICT['height'] = int(height)\n\n FINAL_DICT[LABEL_KEY] = LabelValue\n\n self._parameter = FINAL_DICT"
},
{
"identifier": "Primere",
"path": "Nodes/modules/exif/primere.py",
"snippet": "class Primere(BaseFormat):\n def __init__(self, info: dict = None, raw: str = \"\"):\n super().__init__(info, raw)\n self._pri_format()\n\n def _pri_format(self):\n self._parameter = self._info"
},
{
"identifier": "ComfyUI",
"path": "Nodes/modules/exif/comfyui.py",
"snippet": "class ComfyUI(BaseFormat):\n def __init__(self, info: dict = None, raw: str = \"\"):\n super().__init__(info, raw)\n self._comfy_png()\n\n def _comfy_png(self):\n prompt = self._info.get(\"prompt\") or {}\n workflow = self._info.get(\"workflow\") or {}\n prompt_json = json.loads(prompt)\n\n # find end node of each flow\n end_nodes = list(filter( lambda item: item[-1].get(\"class_type\") in [\"SaveImage\"] + KSAMPLER_TYPES, prompt_json.items(),))\n longest_flow = {}\n longest_nodes = []\n longest_flow_len = 0\n\n for end_node in end_nodes:\n flow, nodes = self._comfy_traverse(prompt_json, str(end_node[0]))\n if len(nodes) > longest_flow_len:\n longest_flow = flow\n longest_nodes = nodes\n longest_flow_len = len(nodes)\n\n SizeID = None\n ModelID = None\n PositiveID = None\n NegativeID = None\n\n if 'latent_image' in flow:\n SizeID = flow['latent_image'][0]\n if 'model' in flow:\n ModelID = flow['model'][0]\n if 'positive' in flow:\n PositiveID = flow['positive'][0]\n if 'negative' in flow:\n NegativeID = flow['negative'][0]\n\n FINAL_DICT = {}\n FINAL_DICT['negative'] = \"\"\n FINAL_DICT['positive'] = \"\"\n\n if PositiveID and NegativeID and 'text_g' in prompt_json[PositiveID]['inputs']:\n FINAL_DICT['positive'] = prompt_json[PositiveID]['inputs']['text_g']\n FINAL_DICT['negative'] = prompt_json[NegativeID]['inputs']['text_g']\n\n if PositiveID and NegativeID and 'text' in prompt_json[PositiveID]['inputs']:\n FINAL_DICT['positive'] = prompt_json[PositiveID]['inputs']['text']\n FINAL_DICT['negative'] = prompt_json[NegativeID]['inputs']['text']\n\n if PositiveID == None or ('text_g' not in prompt_json[PositiveID]['inputs'] and 'text' not in prompt_json[PositiveID]['inputs']):\n if hasattr(self, '_positive'):\n FINAL_DICT['positive'] = self._positive\n if hasattr(self, '_negative'):\n FINAL_DICT['negative'] = self._negative\n\n if 'steps' in flow and type(flow['steps']) == int:\n FINAL_DICT['steps'] = flow['steps']\n if 'sampler_name' in flow and 'scheduler' in flow and type(flow['sampler_name']) == str and type(flow['scheduler']) == str:\n FINAL_DICT['sampler'] = flow['sampler_name'] + ' ' + flow['scheduler']\n if 'seed' in flow and type(flow['seed']) == int:\n FINAL_DICT['seed'] = flow['seed']\n if 'cfg' in flow and (type(flow['cfg']) == int or type(flow['cfg']) == float):\n FINAL_DICT['cfg_scale'] = flow['cfg']\n\n if ModelID and 'ckpt_name' in prompt_json[ModelID]['inputs'] and type(prompt_json[ModelID]['inputs']['ckpt_name']) == str:\n FINAL_DICT['model_name'] = prompt_json[ModelID]['inputs']['ckpt_name'] # flow['ckpt_name']\n elif 'ckpt_name' in flow and type(flow['ckpt_name']) == str:\n FINAL_DICT['model_name'] = flow['ckpt_name']\n\n if SizeID and 'width' in prompt_json[SizeID]['inputs'] and 'height' in prompt_json[SizeID]['inputs'] and type(prompt_json[SizeID]['inputs']['width']) == int:\n origwidth = str(prompt_json[SizeID]['inputs']['width'])\n origheight = str(prompt_json[SizeID]['inputs']['height'])\n FINAL_DICT['width'] = int(origwidth)\n FINAL_DICT['height'] = int(origheight)\n FINAL_DICT['size_string'] = origwidth + 'x' + origheight\n\n self._parameter = FINAL_DICT\n\n def _comfy_traverse(self, prompt, end_node):\n flow = {}\n node = [end_node]\n inputs = {}\n try:\n inputs = prompt[end_node][\"inputs\"]\n except:\n print(\"node error\")\n return flow, node\n match prompt[end_node][\"class_type\"]:\n case node_type if node_type in SAVE_IMAGE_TYPE:\n try:\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"images\"][0]\n )\n flow = utility.merge_dict(flow, last_flow)\n node += last_node\n except:\n print(\"comfyUI SaveImage error\")\n case node_type if node_type in KSAMPLER_TYPES:\n try:\n flow = inputs\n last_flow1, last_node1 = self._comfy_traverse(\n prompt, inputs[\"model\"][0]\n )\n last_flow2, last_node2 = self._comfy_traverse(\n prompt, inputs[\"latent_image\"][0]\n )\n positive = self._comfy_traverse(prompt, inputs[\"positive\"][0])\n if isinstance(positive, str):\n self._positive = positive\n elif isinstance(positive, dict):\n self._positive_sdxl.update(positive)\n negative = self._comfy_traverse(prompt, inputs[\"negative\"][0])\n if isinstance(negative, str):\n self._negative = negative\n elif isinstance(negative, dict):\n self._negative_sdxl.update(negative)\n seed = None\n # handle \"CR Seed\"\n if inputs.get(\"seed\") and isinstance(inputs.get(\"seed\"), list):\n seed = {\"seed\": self._comfy_traverse(prompt, inputs[\"seed\"][0])}\n elif inputs.get(\"noise_seed\") and isinstance(\n inputs.get(\"noise_seed\"), list\n ):\n seed = {\n \"noise_seed\": self._comfy_traverse(\n prompt, inputs[\"noise_seed\"][0]\n )\n }\n if seed:\n flow.update(seed)\n flow = utility.merge_dict(flow, last_flow1)\n flow = utility.merge_dict(flow, last_flow2)\n node += last_node1 + last_node2\n except:\n print(\"comfyUI KSampler error\")\n case node_type if node_type in CLIP_TEXT_ENCODE_TYPE:\n try:\n match node_type:\n case \"CLIPTextEncode\":\n # SDXLPromptStyler\n if isinstance(inputs[\"text\"], list):\n text = int(inputs[\"text\"][0])\n prompt_styler = self._comfy_traverse(prompt, str(text))\n self._positive = prompt_styler[0]\n self._negative = prompt_styler[1]\n return\n elif isinstance(inputs[\"text\"], str):\n return inputs.get(\"text\")\n case \"CLIPTextEncodeSDXL\":\n # SDXLPromptStyler\n self._is_sdxl = True\n if isinstance(inputs[\"text_g\"], list):\n text_g = int(inputs[\"text_g\"][0])\n text_l = int(inputs[\"text_l\"][0])\n prompt_styler_g = self._comfy_traverse(\n prompt, str(text_g)\n )\n prompt_styler_l = self._comfy_traverse(\n prompt, str(text_l)\n )\n self._positive_sdxl[\"Clip G\"] = prompt_styler_g[0]\n self._positive_sdxl[\"Clip L\"] = prompt_styler_l[0]\n self._negative_sdxl[\"Clip G\"] = prompt_styler_g[1]\n self._negative_sdxl[\"Clip L\"] = prompt_styler_l[1]\n return\n elif isinstance(inputs[\"text_g\"], str):\n return {\n \"Clip G\": inputs.get(\"text_g\"),\n \"Clip L\": inputs.get(\"text_l\"),\n }\n case \"CLIPTextEncodeSDXLRefiner\":\n self._is_sdxl = True\n if isinstance(inputs[\"text\"], list):\n # SDXLPromptStyler\n text = int(inputs[\"text\"][0])\n prompt_styler = self._comfy_traverse(prompt, str(text))\n self._positive_sdxl[\"Refiner\"] = prompt_styler[0]\n self._negative_sdxl[\"Refiner\"] = prompt_styler[1]\n return\n elif isinstance(inputs[\"text\"], str):\n return {\"Refiner\": inputs.get(\"text\")}\n except:\n print(\"comfyUI CLIPText error\")\n case \"LoraLoader\":\n try:\n flow = inputs\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"model\"][0]\n )\n flow = utility.merge_dict(flow, last_flow)\n node += last_node\n except:\n print(\"comfyUI LoraLoader error\")\n case node_type if node_type in CHECKPOINT_LOADER_TYPE:\n try:\n return inputs, node\n except:\n print(\"comfyUI CheckpointLoader error\")\n case node_type if node_type in VAE_ENCODE_TYPE:\n try:\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"pixels\"][0]\n )\n flow = utility.merge_dict(flow, last_flow)\n node += last_node\n except:\n print(\"comfyUI VAE error\")\n case \"ControlNetApplyAdvanced\":\n try:\n positive = self._comfy_traverse(prompt, inputs[\"positive\"][0])\n if isinstance(positive, str):\n self._positive = positive\n elif isinstance(positive, dict):\n self._positive_sdxl.update(positive)\n negative = self._comfy_traverse(prompt, inputs[\"negative\"][0])\n if isinstance(negative, str):\n self._negative = negative\n elif isinstance(negative, dict):\n self._negative_sdxl.update(negative)\n\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"image\"][0]\n )\n flow = utility.merge_dict(flow, last_flow)\n node += last_node\n except:\n print(\"comfyUI ControlNetApply error\")\n case \"ImageScale\":\n try:\n flow = inputs\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"image\"][0]\n )\n flow = utility.merge_dict(flow, last_flow)\n node += last_node\n except:\n print(\"comfyUI ImageScale error\")\n case \"UpscaleModelLoader\":\n try:\n return {\"upscaler\": inputs[\"model_name\"]}\n except:\n print(\"comfyUI UpscaleLoader error\")\n case \"ImageUpscaleWithModel\":\n try:\n flow = inputs\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"image\"][0]\n )\n model = self._comfy_traverse(prompt, inputs[\"upscale_model\"][0])\n flow = utility.merge_dict(flow, last_flow)\n flow = utility.merge_dict(flow, model)\n node += last_node\n except:\n print(\"comfyUI UpscaleModel error\")\n case \"ConditioningCombine\":\n try:\n last_flow1, last_node1 = self._comfy_traverse(\n prompt, inputs[\"conditioning_1\"][0]\n )\n last_flow2, last_node2 = self._comfy_traverse(\n prompt, inputs[\"conditioning_2\"][0]\n )\n flow = utility.merge_dict(flow, last_flow1)\n flow = utility.merge_dict(flow, last_flow2)\n node += last_node1 + last_node2\n except:\n print(\"comfyUI ConditioningCombine error\")\n # custom nodes\n case \"SDXLPromptStyler\":\n try:\n return inputs.get(\"text_positive\"), inputs.get(\"text_negative\")\n except:\n print(\"comfyUI SDXLPromptStyler error\")\n case \"CR Seed\":\n try:\n return inputs.get(\"seed\")\n except:\n print(\"comfyUI CR Seed error\")\n case _:\n try:\n last_flow = {}\n last_node = []\n if inputs.get(\"samples\"):\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"samples\"][0]\n )\n elif inputs.get(\"image\") and isinstance(inputs.get(\"image\"), list):\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"image\"][0]\n )\n elif inputs.get(\"model\"):\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"model\"][0]\n )\n elif inputs.get(\"clip\"):\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"clip\"][0]\n )\n elif inputs.get(\"samples_from\"):\n last_flow, last_node = self._comfy_traverse(\n prompt, inputs[\"samples_from\"][0]\n )\n elif inputs.get(\"conditioning\"):\n result = self._comfy_traverse(prompt, inputs[\"conditioning\"][0])\n if isinstance(result, str):\n return result\n elif isinstance(result, list):\n last_flow, last_node = result\n flow = utility.merge_dict(flow, last_flow)\n node += last_node\n except:\n print(\"comfyUI bridging node error\")\n return flow, node"
}
] | import json
import piexif
import pyexiv2
import piexif.helper
from PIL import Image
from .exif.automatic1111 import Automatic1111
from .exif.primere import Primere
from .exif.comfyui import ComfyUI | 5,078 |
# OopCompanion:suppressRename
class ImageExifReader:
def __init__(self, file):
self._raw = ""
self._parser = {}
self._parameter = {}
self._tool = ""
self.read_data(file)
def read_data(self, file):
def is_json(jsoninput):
try:
json.loads(jsoninput)
except ValueError as e:
return False
return True
with Image.open(file) as f:
p2metadata = pyexiv2.Image(file)
is_primere = p2metadata.read_exif()
if 'Exif.Image.ImageDescription' in is_primere:
primere_exif_string = is_primere.get('Exif.Image.ImageDescription').strip()
if is_json(primere_exif_string) == True:
json_object = json.loads(primere_exif_string)
# keysList = {'positive', 'negative', 'positive_l', 'negative_l', 'positive_r', 'negative_r', 'seed', 'model_hash', 'model_name', 'sampler_name'}
# if not (keysList - json_object.keys()):
self._tool = "Primere"
|
# OopCompanion:suppressRename
class ImageExifReader:
def __init__(self, file):
self._raw = ""
self._parser = {}
self._parameter = {}
self._tool = ""
self.read_data(file)
def read_data(self, file):
def is_json(jsoninput):
try:
json.loads(jsoninput)
except ValueError as e:
return False
return True
with Image.open(file) as f:
p2metadata = pyexiv2.Image(file)
is_primere = p2metadata.read_exif()
if 'Exif.Image.ImageDescription' in is_primere:
primere_exif_string = is_primere.get('Exif.Image.ImageDescription').strip()
if is_json(primere_exif_string) == True:
json_object = json.loads(primere_exif_string)
# keysList = {'positive', 'negative', 'positive_l', 'negative_l', 'positive_r', 'negative_r', 'seed', 'model_hash', 'model_name', 'sampler_name'}
# if not (keysList - json_object.keys()):
self._tool = "Primere" | self._parser = Primere(info=json_object) | 1 | 2023-12-17 20:42:27+00:00 | 8k |
amazon-science/c2f-seg | train_vq.py | [
{
"identifier": "load_dataset",
"path": "data/dataloader_vqgan.py",
"snippet": "def load_dataset(args, config):\n if args.dataset==\"KINS\":\n train_dataset = KINS_VQ_dataset(config, mode='train')\n val_dataset = KINS_VQ_dataset(config, mode='test')\n elif args.dataset==\"MOViD_A\":\n train_dataset = Movid_A_VQ_Dataset(config, mode=\"train\")\n val_dataset = Movid_A_VQ_Dataset(config, mode=\"test\")\n elif args.dataset==\"COCOA\":\n train_dataset = COCOA_VQ_dataset(config, mode=\"train\")\n val_dataset = COCOA_VQ_dataset(config, mode=\"test\")\n elif args.dataset==\"Fishbowl\":\n train_dataset = FishBowl_VQ_Dataset(config, mode=\"train\")\n val_dataset = FishBowl_VQ_Dataset(config, mode=\"test\")\n return train_dataset, val_dataset"
},
{
"identifier": "get_IoU",
"path": "utils/evaluation.py",
"snippet": "def get_IoU(pt_mask, gt_mask):\n # pred_mask [N, Image_W, Image_H]\n # gt_mask [N, Image_W, Image_H]\n SMOOTH = 1e-10\n intersection = (pt_mask & gt_mask).sum((-1, -2)).to(torch.float32) # [N, 1]\n union = (pt_mask | gt_mask).sum((-1, -2)).to(torch.float32) # [N, 1]\n\n iou = (intersection + SMOOTH) / (union + SMOOTH) # [N, 1]\n\n return iou"
},
{
"identifier": "setup_logger",
"path": "utils/logger.py",
"snippet": "def setup_logger(work_dir=None, logfile_name='log.txt', logger_name='log'):\n \"\"\"Sets up logger from target work directory.\n\n The function will sets up a logger with `DEBUG` log level. Two handlers will\n be added to the logger automatically. One is the `sys.stdout` stream, with\n `INFO` log level, which will print improtant messages on the screen. The other\n is used to save all messages to file `$WORK_DIR/$LOGFILE_NAME`. Messages will\n be added time stamp and log level before logged.\n\n NOTE: If `work_dir` or `logfile_name` is empty, the file stream will be\n skipped.\n\n Args:\n work_dir: The work directory. All intermediate files will be saved here.\n (default: None)\n logfile_name: Name of the file to save log message. (default: `log.txt`)\n logger_name: Unique name for the logger. (default: `logger`)\n\n Returns:\n A `logging.Logger` object.\n\n Raises:\n SystemExit: If the work directory has already existed, of the logger with\n specified name `logger_name` has already existed.\n \"\"\"\n logger = logging.getLogger(logger_name)\n formatter = logging.Formatter(\"[%(asctime)s][%(levelname)s] %(message)s\")\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n # Print log message with `INFO` level or above onto the screen.\n sh = logging.StreamHandler(stream=sys.stdout)\n # sh.setLevel(logging.INFO)\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n logger.propagate = False\n\n if not work_dir or not logfile_name:\n return logger\n\n if os.path.exists(work_dir):\n print(f'Work directory `{work_dir}` has already existed!')\n os.makedirs(work_dir, exist_ok=True)\n\n # Save log message with all levels in log file.\n fh = logging.FileHandler(os.path.join(work_dir, logfile_name))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger"
},
{
"identifier": "Config",
"path": "utils/utils.py",
"snippet": "class Config(object):\n def __init__(self, config_path):\n with open(config_path, 'r') as f:\n self._yaml = f.read()\n self._dict = yaml.load(self._yaml, Loader=yaml.SafeLoader)\n self._dict['path'] = os.path.dirname(config_path)\n\n def __getattr__(self, name):\n if self._dict.get(name) is not None:\n return self._dict[name]\n\n return None\n\n def print(self):\n print('Model configurations:')\n print('---------------------------------')\n print(self._yaml)\n print('')\n print('---------------------------------')\n print('')"
},
{
"identifier": "Progbar",
"path": "utils/utils.py",
"snippet": "class Progbar(object):\n \"\"\"Displays a progress bar.\n\n Arguments:\n target: Total number of steps expected, None if unknown.\n width: Progress bar width on screen.\n verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n stateful_metrics: Iterable of string names of metrics that\n should *not* be averaged over time. Metrics in this list\n will be displayed as-is. All others will be averaged\n by the progbar before display.\n interval: Minimum visual progress update interval (in seconds).\n \"\"\"\n\n def __init__(self, target, max_iters=None, width=25, verbose=1, interval=0.05,\n stateful_metrics=None):\n self.target = target\n self.width = width\n self.verbose = verbose\n self.interval = interval\n self.max_iters = max_iters\n self.iters = 0\n if stateful_metrics:\n self.stateful_metrics = set(stateful_metrics)\n else:\n self.stateful_metrics = set()\n\n self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and\n sys.stdout.isatty()) or\n 'ipykernel' in sys.modules or\n 'posix' in sys.modules)\n self._total_width = 0\n self._seen_so_far = 0\n # We use a dict + list to avoid garbage collection\n # issues found in OrderedDict\n self._values = {}\n self._values_order = []\n self._start = time.time()\n self._last_update = 0\n\n def update(self, current, values=None):\n \"\"\"Updates the progress bar.\n\n Arguments:\n current: Index of current step.\n values: List of tuples:\n `(name, value_for_last_step)`.\n If `name` is in `stateful_metrics`,\n `value_for_last_step` will be displayed as-is.\n Else, an average of the metric over time will be displayed.\n \"\"\"\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n if k not in self._values:\n self._values[k] = [v * (current - self._seen_so_far),\n current - self._seen_so_far]\n else:\n self._values[k][0] += v * (current - self._seen_so_far)\n self._values[k][1] += (current - self._seen_so_far)\n else:\n self._values[k] = v\n self._seen_so_far = current\n\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if (now - self._last_update < self.interval and\n self.target is not None and current < self.target):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.floor(np.log10(self.target))) + 1\n barstr = '%%%dd/%d [' % (numdigits, self.target)\n bar = barstr % current\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n # if self.target is not None and current < self.target:\n if self.max_iters is None or self.iters < self.max_iters:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta // 3600,\n (eta % 3600) // 60,\n eta % 60)\n elif eta > 60:\n eta_format = '%d:%02d' % (eta // 60, eta % 60)\n else:\n eta_format = '%ds' % eta\n\n info = ' - ETA: %s' % eta_format\n else:\n if time_per_unit >= 1:\n info += ' %.0fs/step' % time_per_unit\n elif time_per_unit >= 1e-3:\n info += ' %.0fms/step' % (time_per_unit * 1e3)\n else:\n info += ' %.0fus/step' % (time_per_unit * 1e6)\n\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n if 'lr' in k:\n info += ' %.3e' % self._values[k]\n else:\n info += ' %s' % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if self.target is not None and current >= self.target:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is None or current >= self.target:\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now\n\n def add(self, n, values=None):\n self.iters += 1\n self.update(self._seen_so_far + n, values)"
},
{
"identifier": "to_cuda",
"path": "utils/utils.py",
"snippet": "def to_cuda(meta, device):\n for k in meta:\n if meta[k] is not None:\n meta[k] = meta[k].to(device)\n return meta"
},
{
"identifier": "stitch_images",
"path": "utils/utils.py",
"snippet": "def stitch_images(inputs, outputs, img_per_row=2, mode=\"L\"):\n gap = 5\n columns = len(outputs) + 1\n\n height, width = inputs[0][:, :, 0].shape\n img = Image.new(mode,\n (width * img_per_row * columns + gap * (img_per_row - 1), height * int(len(inputs) / img_per_row)))\n images = [inputs] + outputs\n\n for ix in range(len(inputs)):\n xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap\n yoffset = int(ix / img_per_row) * height\n\n for cat in range(len(images)):\n im = (np.array((images[cat][ix]).cpu())*255).astype(np.uint8).squeeze()\n im = Image.fromarray(im, mode)\n img.paste(im, (xoffset + cat * width, yoffset))\n\n return img"
},
{
"identifier": "get_lr_schedule_with_steps",
"path": "utils/utils.py",
"snippet": "def get_lr_schedule_with_steps(decay_type, optimizer, drop_steps=None, gamma=None, total_steps=None):\n def lr_lambda(current_step):\n if decay_type == 'fix':\n return 1.0\n elif decay_type == 'linear':\n return 1.0 * (current_step / total_steps)\n elif decay_type == 'cos':\n return 1.0 * (math.cos((current_step / total_steps) * math.pi) + 1) / 2\n elif decay_type == 'milestone':\n return 1.0 * math.pow(gamma, int(current_step / drop_steps))\n else:\n raise NotImplementedError\n\n return LambdaLR(optimizer, lr_lambda)"
},
{
"identifier": "torch_init_model",
"path": "utils/utils.py",
"snippet": "def torch_init_model(model, init_checkpoint, key):\n state_dict = torch.load(init_checkpoint, map_location='cpu')[key]\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=''):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + '.')\n\n load(model, prefix='')\n \n print(\"missing keys:{}\".format(missing_keys))\n print('unexpected keys:{}'.format(unexpected_keys))\n print('error msgs:{}'.format(error_msgs))"
},
{
"identifier": "VQLPIPSWithDiscriminator",
"path": "taming_src/vqperceptual.py",
"snippet": "class VQLPIPSWithDiscriminator(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n disc_start = config['disc_start']\n codebook_weight = config['codebook_weight']\n pixelloss_weight = 1.0\n disc_num_layers = config['disc_num_layers']\n disc_in_channels = config['disc_in_channels']\n disc_factor = 1.0\n disc_weight = config['disc_weight']\n perceptual_weight = 1.0\n use_actnorm = False\n disc_conditional = config['disc_conditional']\n disc_ndf = 64\n disc_loss=\"hinge\"\n\n assert disc_loss in [\"hinge\", \"vanilla\"]\n self.codebook_weight = codebook_weight\n self.pixel_weight = pixelloss_weight\n self.perceptual_loss = LPIPS().eval()\n self.perceptual_weight = perceptual_weight\n\n self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,\n n_layers=disc_num_layers,\n use_actnorm=use_actnorm,\n ndf=disc_ndf).apply(weights_init)\n self.discriminator_iter_start = disc_start\n if disc_loss == \"hinge\":\n self.disc_loss = hinge_d_loss\n elif disc_loss == \"vanilla\":\n self.disc_loss = vanilla_d_loss\n else:\n raise ValueError(f\"Unknown GAN loss '{disc_loss}'.\")\n print(f\"VQLPIPSWithDiscriminator running with {disc_loss} loss.\")\n self.disc_factor = disc_factor\n self.discriminator_weight = disc_weight\n self.disc_conditional = disc_conditional\n\n def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):\n if last_layer is not None:\n nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]\n g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]\n else:\n nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]\n g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]\n\n d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)\n d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()\n d_weight = d_weight * self.discriminator_weight\n return d_weight\n\n def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,\n global_step, cond=None, split=\"train\"):\n rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())\n if self.perceptual_weight > 0:\n p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())\n rec_loss = rec_loss + self.perceptual_weight * p_loss\n else:\n p_loss = torch.tensor([0.0])\n\n nll_loss = rec_loss\n # nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]\n nll_loss = torch.mean(nll_loss)\n\n # now the GAN part\n if optimizer_idx == 0:\n # generator update\n if cond is None:\n assert not self.disc_conditional\n logits_fake = self.discriminator(reconstructions.contiguous())\n else:\n assert self.disc_conditional\n logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))\n g_loss = -torch.mean(logits_fake)\n\n # try:\n # d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)\n # except RuntimeError:\n # assert not self.training\n # d_weight = torch.tensor(0.0)\n # d_weight = torch.tensor(1.0)\n # with torch.no_grad():\n # d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)\n\n # disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)\n # loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()\n return nll_loss, g_loss, self.codebook_weight * codebook_loss.mean()\n\n # log = [(\"{}/total_loss\".format(split), loss.clone().detach().mean().item()),\n # (\"{}/quant_loss\".format(split), codebook_loss.detach().mean().item()),\n # (\"{}/nll_loss\".format(split), nll_loss.detach().mean().item()),\n # (\"{}/rec_loss\".format(split), rec_loss.detach().mean().item()),\n # (\"{}/p_loss\".format(split), p_loss.detach().mean().item()),\n # (\"{}/d_weight\".format(split), d_weight.detach().item()),\n # (\"{}/disc_factor\".format(split), disc_factor),\n # (\"{}/g_loss\".format(split), g_loss.detach().mean().item())]\n # return loss#, log\n\n if optimizer_idx == 1:\n # second pass for discriminator update\n if cond is None:\n logits_real = self.discriminator(inputs.contiguous().detach())\n logits_fake = self.discriminator(reconstructions.contiguous().detach())\n else:\n logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))\n logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))\n\n disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)\n d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)\n\n log = [(\"{}/disc_loss\".format(split), d_loss.clone().detach().mean().item()),\n (\"{}/logits_real\".format(split), logits_real.detach().mean().item()),\n (\"{}/logits_fake\".format(split), logits_fake.detach().mean().item())]\n return d_loss#, log"
},
{
"identifier": "adopt_weight",
"path": "taming_src/vqperceptual.py",
"snippet": "def adopt_weight(weight, global_step, threshold=0, value=0.):\n if global_step < threshold:\n weight = value\n return weight"
},
{
"identifier": "VQModel",
"path": "taming_src/taming_models.py",
"snippet": "class VQModel(nn.Module):\n def __init__(self, config):\n super(VQModel, self).__init__()\n self.config = config\n self.iteration = 0\n self.name = config.model_type\n self.m_path = os.path.join(config.path, self.name)\n self.eps = 1e-6\n\n self.ddconfig = config.model['params']['ddconfig']\n n_embed = config.model['params']['n_embed']\n embed_dim = config.model['params']['embed_dim']\n \n self.encoder = Encoder(self.ddconfig).to(config.device)\n self.decoder = Decoder(self.ddconfig).to(config.device)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25).to(config.device).to(config.device)\n self.quant_conv = torch.nn.Conv2d(self.ddconfig[\"z_channels\"], embed_dim, 1).to(config.device)\n # self.quant_proj = torch.nn.Linear(self.ddconfig[\"z_channels\"], embed_dim).to(config.device)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, self.ddconfig[\"z_channels\"], 1).to(config.device)\n # self.pose_quant_proj = torch.nn.Linear(embed_dim, self.ddconfig[\"z_channels\"]).to(config.device)\n\n def encode(self, x, mask=None):\n h = self.encoder(x) # dim=256\n h = self.quant_conv(h) # dim=256\n if mask is not None:\n mask = F.max_pool2d(mask, kernel_size=int(mask.shape[2] / h.shape[2]),\n stride=int(mask.shape[2] / h.shape[2]))\n quant = quant * mask + h * (1 - mask)\n quant, emb_loss, info = self.quantize(h, mask)\n \n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant) # dim: 256\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, x, mask=None):\n quant, diff, _ = self.encode(x, mask) # quant dim: 256\n\n dec = self.decode(quant)\n return dec, diff\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def restore(self, ckpt_file, g_opt=None, d_opt=None):\n torch_init_model(self, ckpt_file, \"state_dict\")\n saving = torch.load(ckpt_file, map_location='cpu')\n if 'optimizer_states' in saving and g_opt is not None and d_opt is not None:\n opt_state = saving['optimizer_states']\n g_opt.load_state_dict(opt_state[0])\n d_opt.load_state_dict(opt_state[1])\n print(f\"Restored from {ckpt_file}\")\n return g_opt, d_opt\n\n def save(self, prefix=None, g_opt=None, d_opt=None):\n if prefix is not None:\n save_path = self.m_path + \"_{}.pth\".format(prefix)\n else:\n save_path = self.m_path + \".pth\"\n\n print('\\nsaving {} {}...\\n'.format(self.name, prefix))\n all_saving = {'state_dict': self.state_dict(),\n 'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}\n torch.save(all_saving, save_path)"
}
] | import os
import cv2
import random
import numpy as np
import torch
import argparse
import time
from shutil import copyfile
from torch.utils.data import DataLoader
from data.dataloader_vqgan import load_dataset
from utils.evaluation import get_IoU
from utils.logger import setup_logger
from utils.utils import Config, Progbar, to_cuda, stitch_images
from utils.utils import get_lr_schedule_with_steps, torch_init_model
from taming_src.vqperceptual import VQLPIPSWithDiscriminator, adopt_weight
from taming_src.taming_models import VQModel | 6,397 |
def restore(ckpt_file, g_model, d_model, g_opt, d_opt):
torch_init_model(g_model, ckpt_file, "g_model")
torch_init_model(d_model, ckpt_file, "d_model")
saving = torch.load(ckpt_file, map_location='cpu')
# if 'optimizer_states' in saving:
# opt_state = saving['optimizer_states']
# # print(opt_state[0])
# g_opt.load_state_dict(opt_state[0])
# d_opt.load_state_dict(opt_state[1])
print(f"Restored from {ckpt_file}")
return g_opt, d_opt
def save(g_model, d_model, m_path, prefix=None, g_opt=None, d_opt=None):
if prefix is not None:
save_path = m_path + "_{}.pth".format(prefix)
else:
save_path = m_path + ".pth"
print('\nsaving {}...\n'.format(save_path))
all_saving = {'g_model': g_model.state_dict(),
'd_model': d_model.state_dict(),
'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}
torch.save(all_saving, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--finetune_path', type=str, required=False, default=None)
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--learn_type', default="mask", type=str)
parser.add_argument('--check_point_path', default="../check_points", type=str)
parser.add_argument('--dataset', default="Kins", type=str)
args = parser.parse_args()
args.path = os.path.join(args.check_point_path, args.path)
config_path = os.path.join(args.path, 'vqgan_{}.yml'.format(args.dataset))
# create checkpoints path if does't exist
if not os.path.exists(args.path):
os.makedirs(args.path)
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('configs/vqgan_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
# cuda visble devices
local_rank = 0
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if local_rank == 0:
|
def restore(ckpt_file, g_model, d_model, g_opt, d_opt):
torch_init_model(g_model, ckpt_file, "g_model")
torch_init_model(d_model, ckpt_file, "d_model")
saving = torch.load(ckpt_file, map_location='cpu')
# if 'optimizer_states' in saving:
# opt_state = saving['optimizer_states']
# # print(opt_state[0])
# g_opt.load_state_dict(opt_state[0])
# d_opt.load_state_dict(opt_state[1])
print(f"Restored from {ckpt_file}")
return g_opt, d_opt
def save(g_model, d_model, m_path, prefix=None, g_opt=None, d_opt=None):
if prefix is not None:
save_path = m_path + "_{}.pth".format(prefix)
else:
save_path = m_path + ".pth"
print('\nsaving {}...\n'.format(save_path))
all_saving = {'g_model': g_model.state_dict(),
'd_model': d_model.state_dict(),
'optimizer_states': [g_opt.state_dict(), d_opt.state_dict()]}
torch.save(all_saving, save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--finetune_path', type=str, required=False, default=None)
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--learn_type', default="mask", type=str)
parser.add_argument('--check_point_path', default="../check_points", type=str)
parser.add_argument('--dataset', default="Kins", type=str)
args = parser.parse_args()
args.path = os.path.join(args.check_point_path, args.path)
config_path = os.path.join(args.path, 'vqgan_{}.yml'.format(args.dataset))
# create checkpoints path if does't exist
if not os.path.exists(args.path):
os.makedirs(args.path)
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('configs/vqgan_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
# cuda visble devices
local_rank = 0
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if local_rank == 0: | logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file) | 2 | 2023-12-21 04:25:47+00:00 | 8k |
alipay/PainlessInferenceAcceleration | pia/lookahead/common/pretrained_model.py | [
{
"identifier": "LookaheadCache",
"path": "pia/lookahead/common/lookahead_cache.py",
"snippet": "class LookaheadCache():\n def __init__(self, debug=False, eos=2, stop_words=None, max_node=512, max_output_node=256):\n self.debug = debug\n self.eos = eos\n self.max_node = max_node\n self.max_output_node = max_output_node\n self.mem = {}\n self._output_ids = defaultdict(list)\n self._update_trees = set()\n self._update_input_trees = set()\n self.stop_words = stop_words if stop_words is not None else {}\n self.default_mask = np.ones((1, 1), dtype=np.int64)\n\n def put(self, token_ids, branch_length=8, final=False, mode='output', idx=-1):\n if self.eos in token_ids:\n token_ids = token_ids[:token_ids.index(self.eos)]\n if len(token_ids) >= 2:\n ts = len(token_ids) # ts: token_ids size\n\n for i in range(ts - 1):\n token_id = token_ids[i]\n tup = token_ids[i + 1:i + branch_length + 1]\n if self.debug:\n print(f'input token:{token_id} tokens:{tup}')\n tree = self.mem.get(token_id, None)\n if tree is not None:\n tree.put(tup, mode=mode, idx=idx)\n else:\n tree = Tree(token_id, max_node=self.max_node, max_output_node=self.max_output_node)\n tree.put(tup, mode=mode, idx=idx)\n self.mem[token_id] = tree\n self._update_trees.add(tree)\n if mode == 'input':\n self._update_input_trees.add(tree)\n\n if final:\n self.reset_input_freqs()\n self.squeeze_branch_counts()\n\n def stream_put(self, token_ids, branch_length=8, final=False, mode='output', idx=0):\n # idx is only used for caching output_ids\n assert mode == 'output' and idx >= 0\n if self.eos in token_ids:\n token_ids = token_ids[:token_ids.index(self.eos)]\n self._output_ids[idx].extend(token_ids)\n output_ids = self._output_ids[idx]\n ts = len(output_ids)\n min_branch_length = 1 if final else branch_length\n if ts > min_branch_length:\n for i in range(ts - min_branch_length):\n token_id = output_ids[i]\n tup = output_ids[i + 1:i + branch_length + 1]\n if self.debug:\n print(f'input token:{token_id} tokens:{tup}')\n tree = self.mem.get(token_id, None)\n if tree:\n tree.put(tup, mode='output', idx=-1)\n else:\n tree = Tree(token_id, max_node=self.max_node, max_output_node=self.max_output_node)\n tree.put(tup, mode='output', idx=-1)\n self.mem[token_id] = tree\n self._update_trees.add(tree)\n if not final:\n self._output_ids[idx] = output_ids[ts - branch_length:]\n if final:\n self._output_ids[idx] = []\n self.reset_input_freqs()\n self.squeeze_branch_counts()\n\n def hier_get(self, token_ids, decoding_length=64, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',\n idx=0):\n assert mode in ('input', 'output', 'mix')\n\n decoding_masks = self.default_mask\n if decoding_length <= 1 or branch_length == 0:\n return token_ids[-1:], decoding_masks, []\n\n decoding_ids = None\n sizes = [0, 0]\n match_count = len(token_ids)\n for i, t in enumerate(token_ids):\n tree = self.mem.get(t, None)\n if tree is not None:\n ids = token_ids[i + 1:]\n if t in self.stop_words and len(ids) == 0:\n continue\n decoding_ids, decoding_masks, sizes = tree.get(ids,\n max_size=decoding_length,\n max_length=branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=idx)\n s = len(decoding_ids)\n match_count = len(token_ids) - i\n # token count is enough, not need retrieve again\n if s >= branch_length:\n break\n\n if decoding_ids is None:\n decoding_ids = token_ids[-1:]\n\n return decoding_ids, decoding_masks, sizes\n\n def par_get(self, token_ids, decoding_length=16, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',\n idx=0):\n\n output_ids, decoding_masks, decoding_lengths = self.trie_get(token_ids,\n decoding_length=decoding_length,\n branch_length=branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=idx)\n sets = []\n true_decoding_length = len(output_ids) - 1\n for i in range(true_decoding_length, 0, -1):\n indices, = np.nonzero(decoding_masks[i, 1:])\n indices = set(indices)\n flag = True\n for ss in sets:\n if len(indices - ss) == 0:\n flag = False\n break\n if flag:\n sets.append(indices)\n\n sets.reverse()\n count = 0\n max_decoding_length = true_decoding_length\n branches = []\n for indices in sets:\n indices = sorted(list(indices))\n rest_count = max_decoding_length - count\n indices = indices[:rest_count]\n count += len(indices)\n branch = []\n for i in indices:\n branch.append(output_ids[i + 1])\n branches.append(branch)\n if count >= max_decoding_length:\n break\n ids = [output_ids[0]]\n masks = np.tril(np.ones((count + 1, count + 1)), 0)\n count = 1\n for branch in branches:\n ids.extend(branch)\n length = len(branch)\n masks[count:count + length, 1:count] = 0\n count += length\n\n return ids, masks, [count - 1]\n\n def one_get(self, token_ids, decoding_length=64, branch_length=8, min_input_size=0, min_output_size=0, mode='mix',\n idx=0):\n assert mode in ('input', 'output', 'mix')\n\n decoding_masks = self.default_mask\n if decoding_length <= 1 or branch_length == 0:\n return token_ids[-1:], decoding_masks, []\n\n decoding_ids = None\n sizes = [0, 0]\n for i, t in enumerate(token_ids):\n tree = self.mem.get(t, None)\n if tree is not None:\n ids = token_ids[i + 1:]\n if t in self.stop_words and len(ids) == 0:\n continue\n decoding_ids, decoding_masks, sizes = tree.get_one_branch(ids,\n max_length=branch_length,\n mode=mode,\n idx=idx)\n s = len(decoding_ids)\n # token count is enough, not need retrieve again\n if s >= branch_length // 2:\n break\n print(f'{decoding_ids=}')\n if decoding_ids is None:\n decoding_ids = token_ids[-1:]\n\n return decoding_ids, decoding_masks, sizes\n\n def bat_get(self, token_id_list, decoding_length=64, branch_length=8, decoding_cursors=None, mode='output',\n indices=None, decoding_mode='hier'):\n assert mode in ('input', 'output', 'mix')\n assert decoding_mode in ('hier', 'one')\n bs = len(token_id_list)\n assert bs == len(decoding_cursors) and bs == len(indices), f'{bs=} {len(decoding_cursors)=} {len(indices)=}'\n\n decoding_id_list = []\n decoding_mask_list = []\n size_list = []\n\n min_cur = min(decoding_cursors)\n max_cur = max(decoding_cursors)\n bs = len(decoding_cursors)\n for sub_idx, token_ids in enumerate(token_id_list):\n update_decoding_length = decoding_length // bs\n min_input_size = 0\n min_output_size = max(update_decoding_length // 2, 1)\n method_name = decoding_mode + '_get'\n decoding_ids, decoding_masks, sizes = getattr(self, method_name)(token_ids,\n decoding_length=update_decoding_length,\n branch_length=branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=indices[sub_idx])\n decoding_id_list.append(decoding_ids)\n decoding_mask_list.append(decoding_masks)\n size_list.append(sizes)\n\n bs = len(token_id_list)\n max_size = max([len(x) for x in decoding_id_list])\n\n decoding_masks = np.zeros((bs, max_size, max_cur - min_cur + max_size), dtype=np.int64)\n for i, decoding_ids in enumerate(decoding_id_list):\n org_size = len(decoding_ids)\n gap = max_size - org_size\n if gap > 0:\n decoding_ids.extend([self.eos] * gap)\n cur = decoding_cursors[i]\n decoding_masks[i, :org_size, cur - min_cur:cur - min_cur + org_size] = decoding_mask_list[i]\n decoding_masks[i, :, :cur - min_cur + 1] = 1\n return decoding_id_list, decoding_masks, size_list\n\n def fresh(self):\n self.mem = {}\n\n def reset_input_freqs(self):\n if len(self._update_input_trees) > 0:\n for c in self._update_input_trees:\n c.reset_input_freq()\n self._update_input_trees.clear()\n\n def squeeze_branch_counts(self):\n if len(self._update_trees) >= 1024:\n for c in self._update_trees:\n c.squeeze()\n self._update_trees.clear()\n\n def save_mem(self, save_mem_dir):\n cache_mem = self.mem\n serialized_object = pickle.dumps(cache_mem)\n json_string = json.dumps(serialized_object.decode('latin-1'))\n with open(save_mem_dir, 'w') as f:\n json.dump(json_string, f)\n\n def load_mem(self, load_mem_dir):\n with open(load_mem_dir, 'r') as f:\n json_string = json.load(f)\n deserialized_object = pickle.loads(json.loads(json_string).encode('latin-1'))\n cache_mem = deserialized_object\n self.mem = cache_mem"
},
{
"identifier": "GenerationMode",
"path": "pia/lookahead/common/lookahead_generation_utils.py",
"snippet": "class GenerationMode(ExplicitEnum):\n \"\"\"\n Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.\n \"\"\"\n\n # Non-beam methods\n CONTRASTIVE_SEARCH = \"contrastive_search\"\n GREEDY_SEARCH = \"greedy_search\"\n LOOKAHEAD_GENERATION = \"lookahead_generation\"\n SAMPLE = \"sample\"\n ASSISTED_GENERATION = \"assisted_generation\"\n # Beam methods\n BEAM_SEARCH = \"beam_search\"\n BEAM_SAMPLE = \"beam_sample\"\n CONSTRAINED_BEAM_SEARCH = \"constrained_beam_search\"\n GROUP_BEAM_SEARCH = \"group_beam_search\""
},
{
"identifier": "LookaheadDecoderOnlyOutput",
"path": "pia/lookahead/common/lookahead_generation_utils.py",
"snippet": "class LookaheadDecoderOnlyOutput(ModelOutput):\n \"\"\"\n Base class for outputs of decoder-only generation models using greedy search.\n\n\n Args:\n sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter\n if all batches finished early due to the `eos_token_id`.\n scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):\n Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)\n at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for\n each generated token), with each tensor of shape `(batch_size, config.vocab_size)`.\n attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.\n hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`.\n \"\"\"\n\n sequences: torch.LongTensor = None\n scores: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n kwargs: Optional[Dict] = None"
}
] | import copy
import inspect
import time
import warnings
import numpy as np
import torch
import torch.distributed as dist
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from transformers import PreTrainedModel
from transformers.generation.beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation.beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer
from transformers.generation.logits_process import (
LogitsProcessorList,
MinLengthLogitsProcessor,
)
from transformers.generation.stopping_criteria import (
MaxLengthCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
from transformers.generation.utils import (
GreedySearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput)
from transformers.generation.utils import (
GreedySearchOutput,
GenerateOutput)
from transformers.utils import ModelOutput, logging
from transformers.generation.configuration_utils import GenerationConfig
from pia.lookahead.common.lookahead_cache import LookaheadCache
from pia.lookahead.common.lookahead_generation_utils import GenerationMode, LookaheadDecoderOnlyOutput | 6,565 | input_id_list = input_ids[0].tolist()
decoding_kwargs['input_id_list'] = [input_id_list]
branch_length = decoding_kwargs.get('branch_length', 12)
self.lookahead_cache.put(input_id_list[1:], branch_length=branch_length + 1, mode='input', idx=0)
ts = time.time()
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# prepare model inputs
model_inputs = self.lookahead_prepare_inputs_for_generation(input_ids, **model_kwargs)
decoding_kwargs = model_inputs.pop('decoding_kwargs', {})
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
continue # don't waste resources running the code we don't need
model_kwargs['decoding_kwargs'] = decoding_kwargs
model_kwargs = self._lookahead_update_model_kwargs_for_generation(
outputs,
model_kwargs,
is_encoder_decoder=self.config.is_encoder_decoder,
input_ids=input_ids,
logits_processor=logits_processor
)
next_tokens = model_kwargs['next_tokens']
next_tokens_scores = model_kwargs['next_tokens_scores']
next_token_list = model_kwargs['next_token_list']
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens], dim=-1)
if streamer is not None:
streamer.put(next_token_list)
self.lookahead_cache.stream_put(next_token_list[0], branch_length=branch_length + 1, final=False,
mode='output', idx=0)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_tokens_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id_tensor is not None:
# unfinished_sequences = unfinished_sequences.mul(
# next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
# )
unfinished_sequences = unfinished_sequences.mul(
next_tokens[:, :, None].ne(eos_token_id_tensor).prod(dim=2).prod(dim=1))
# stop when each sentence is finished
if unfinished_sequences.max() == 0:
this_peer_finished = True
# stop if we exceed the maximum length
if stopping_criteria(input_ids, scores):
this_peer_finished = True
te = time.time()
model_kwargs['decoding_kwargs']['fts'].append(te - ts)
ts = te
if this_peer_finished and not synced_gpus:
self.lookahead_cache.stream_put([], branch_length=branch_length + 1, final=True,
mode='output', idx=0)
break
if streamer is not None:
streamer.end()
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return GreedySearchEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
kwargs = {'dls': model_kwargs['decoding_kwargs']['dls'],
'edls': model_kwargs['decoding_kwargs']['edls'],
'fts': model_kwargs['decoding_kwargs']['fts']}
| # -*- coding: utf-8 -*-
"""
Copyright (c) Ant Financial Service Group and its affiliates.
"""
from __future__ import print_function
# from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
logger = logging.get_logger(__name__)
class LookaheadPreTrainedModel(PreTrainedModel):
_batch_generation = False
_stream_generation = False
def __init__(self, config):
super().__init__(config=config)
def _get_generation_mode(
self, generation_config: GenerationConfig, assistant_model: Optional["PreTrainedModel"]
) -> GenerationMode:
"""
Returns the generation mode triggered by a [`GenerationConfig`] instance.
"""
if generation_config.constraints is not None or generation_config.force_words_ids is not None:
generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH
elif generation_config.num_beams == 1:
if generation_config.do_sample is False:
if (
generation_config.top_k is not None
and generation_config.top_k > 1
and generation_config.penalty_alpha is not None
and generation_config.penalty_alpha > 0
):
generation_mode = GenerationMode.CONTRASTIVE_SEARCH
elif generation_config.use_cache \
and hasattr(generation_config, 'decoding_kwargs') \
and generation_config.decoding_kwargs.get('use_lookahead', False) \
and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \
and generation_config.decoding_kwargs.get('branch_length', 12) > 0:
generation_mode = GenerationMode.LOOKAHEAD_GENERATION
else:
generation_mode = GenerationMode.GREEDY_SEARCH
else:
if generation_config.use_cache \
and hasattr(generation_config, 'decoding_kwargs') \
and generation_config.decoding_kwargs.get('use_lookahead', False) \
and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \
and generation_config.decoding_kwargs.get('branch_length', 12) > 0:
generation_mode = GenerationMode.LOOKAHEAD_GENERATION
else:
generation_mode = GenerationMode.SAMPLE
else:
if generation_config.num_beam_groups > 1:
generation_mode = GenerationMode.GROUP_BEAM_SEARCH
elif generation_config.do_sample is True:
generation_mode = GenerationMode.BEAM_SAMPLE
else:
generation_mode = GenerationMode.BEAM_SEARCH
# Assisted generation may extend some generation modes
if assistant_model is not None:
if generation_mode in ("greedy_search", "sample"):
generation_mode = GenerationMode.ASSISTED_GENERATION
else:
raise ValueError(
"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate "
"is only supported with Greedy Search and Sample."
)
return generation_mode
@torch.no_grad()
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](../generation_strategies).
</Tip>
Parameters:
inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://arxiv.org/abs/2010.00904).
synced_gpus (`bool`, *optional*):
Whether to continue running the while loop until max_length. Unless overridden this flag will be set to
`True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished
generating before other GPUs. Otherwise it'll be set to `False`.
assistant_model (`PreTrainedModel`, *optional*):
An assistant model that can be used to accelerate generation. The assistant model must have the exact
same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model
is much faster than running generation with the model you're calling generate from. As such, the
assistant model should be much smaller.
streamer (`BaseStreamer`, *optional*):
Streamer object that will be used to stream the generated sequences. Generated tokens are passed
through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
kwargs (`Dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GreedySearchDecoderOnlyOutput`],
- [`~generation.SampleDecoderOnlyOutput`],
- [`~generation.BeamSearchDecoderOnlyOutput`],
- [`~generation.BeamSampleDecoderOnlyOutput`]
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.GreedySearchEncoderDecoderOutput`],
- [`~generation.SampleEncoderDecoderOutput`],
- [`~generation.BeamSearchEncoderDecoderOutput`],
- [`~generation.BeamSampleEncoderDecoderOutput`]
"""
if synced_gpus is None:
# if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1:
# synced_gpus = True
# else:
# synced_gpus = False
synced_gpus = False
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
self._validate_model_class()
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
if generation_config is None:
# legacy: users may modify the model configuration to control generation -- update the generation config
# model attribute accordingly, if it was created from the model config
if self.generation_config._from_model_config:
new_generation_config = GenerationConfig.from_model_config(self.config)
if new_generation_config != self.generation_config:
# warnings.warn(
# "You have modified the pretrained model configuration to control generation. This is a"
# " deprecated strategy to control generation and will be removed soon, in a future version."
# " Please use a generation configuration file (see"
# " https://huggingface.co/docs/transformers/main_classes/text_generation )"
# )
self.generation_config = new_generation_config
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
generation_config.validate()
self._validate_model_kwargs(model_kwargs.copy())
if not hasattr(generation_config, 'decoding_kwargs'):
generation_config.decoding_kwargs = model_kwargs.get('decoding_kwargs', {})
# 2. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
if model_kwargs.get("attention_mask", None) is None:
logger.warning(
"The attention mask and the pad token id were not set. As a consequence, you may observe "
"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
)
eos_token_id = generation_config.eos_token_id
if isinstance(eos_token_id, list):
eos_token_id = eos_token_id[0]
logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
generation_config.pad_token_id = eos_token_id
# 3. Define model inputs
# inputs_tensor has to be defined
# model_input_name is defined if model-specific keyword input is passed
# otherwise model_input_name is None
# all model-specific keyword inputs are removed from `model_kwargs`
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
# 4. Define other model kwargs
model_kwargs["output_attentions"] = generation_config.output_attentions
model_kwargs["output_hidden_states"] = generation_config.output_hidden_states
# decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are
# generating the first new token or not, and we only want to use the embeddings for the first new token)
if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds":
model_kwargs["use_cache"] = True
else:
model_kwargs["use_cache"] = generation_config.use_cache
accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys())
requires_attention_mask = "encoder_outputs" not in model_kwargs
if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask:
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id
)
# decoder-only models should use left-padding for generation
if not self.config.is_encoder_decoder:
# If `input_ids` was given, check if the last id in any sequence is `pad_token_id`
# Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.
if (
generation_config.pad_token_id is not None
and len(inputs_tensor.shape) == 2
and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0
):
logger.warning(
"A decoder-only architecture is being used, but right-padding was detected! For correct "
"generation results, please set `padding_side='left'` when initializing the tokenizer."
)
if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created
# and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name
)
# 5. Prepare `input_ids` which will be used for auto-regressive generation
if self.config.is_encoder_decoder:
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config.decoder_start_token_id,
bos_token_id=generation_config.bos_token_id,
device=inputs_tensor.device,
)
else:
input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
if streamer is not None:
streamer.put(input_ids.cpu())
# 6. Prepare `max_length` depending on other stopping criteria.
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if generation_config.max_new_tokens is not None:
if not has_default_max_length:
logger.warning(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
)
generation_config.max_length = generation_config.max_new_tokens + input_ids_length
# 7. determine generation mode
generation_mode = self._get_generation_mode(generation_config, assistant_model)
if streamer is not None and (generation_config.num_beams > 1):
raise ValueError(
"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
)
if self.device.type != input_ids.device.type:
warnings.warn(
"You are calling .generate() with the `input_ids` being on a device type different"
f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
" Please make sure that you have put `input_ids` to the"
f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
" running `.generate()`.",
UserWarning,
)
# 8. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
)
# 9. prepare stopping criteria
stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria
)
decoding_kwargs = generation_config.decoding_kwargs if hasattr(generation_config, 'decoding_kwargs') else {}
decoding_kwargs['generation_mode'] = generation_mode
decoding_kwargs['do_sample'] = generation_config.do_sample
decoding_kwargs['inputs_embeds_position'] = generation_config.inputs_embeds_position if hasattr(generation_config, 'inputs_embeds_position') else 0
decoding_kwargs['max_length'] = generation_config.max_length
if generation_mode == GenerationMode.LOOKAHEAD_GENERATION:
decoding_length = decoding_kwargs.get('decoding_length', 64)
decoding_kwargs['decoding_max_length'] = generation_config.max_length + decoding_length + 1
else:
decoding_kwargs['decoding_max_length'] = generation_config.max_length
model_kwargs['decoding_kwargs'] = decoding_kwargs
# 10. go into different generation modes
if generation_mode == GenerationMode.ASSISTED_GENERATION:
if generation_config.num_return_sequences > 1:
raise ValueError(
"num_return_sequences has to be 1 when doing assisted generate, "
f"but is {generation_config.num_return_sequences}."
)
if batch_size > 1:
raise ValueError("assisted generate is only supported for batch_size = 1")
if not model_kwargs["use_cache"]:
raise ValueError("assisted generate requires `use_cache=True`")
# 11. If the assistant model is an encoder-decoder, prepare its encoder outputs
if assistant_model.config.is_encoder_decoder:
assistant_model_kwargs = copy.deepcopy(model_kwargs)
inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs(
inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs
)
assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, assistant_model_kwargs, model_input_name
)
model_kwargs["assistant_encoder_outputs"] = assistant_model_kwargs["encoder_outputs"]
# 12. run assisted generate
return self.assisted_decoding(
input_ids,
assistant_model=assistant_model,
do_sample=generation_config.do_sample,
logits_processor=logits_processor,
logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)
if generation_mode == GenerationMode.GREEDY_SEARCH:
# 11. run greedy search
return self.greedy_search(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)
elif generation_mode == GenerationMode.LOOKAHEAD_GENERATION:
# 11. run greedy search
return self.lookahead_generation(
input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)
elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:
if not model_kwargs["use_cache"]:
raise ValueError("Contrastive search requires `use_cache=True`")
return self.contrastive_search(
input_ids,
top_k=generation_config.top_k,
penalty_alpha=generation_config.penalty_alpha,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
streamer=streamer,
sequential=generation_config.low_memory,
**model_kwargs,
)
elif generation_mode == GenerationMode.SAMPLE:
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config)
# 12. expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run sample
return self.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)
elif generation_mode == GenerationMode.BEAM_SEARCH:
# 11. prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run beam search
return self.beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif generation_mode == GenerationMode.BEAM_SAMPLE:
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config)
# 12. prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 13. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 14. run beam sample
return self.beam_sample(
input_ids,
beam_scorer,
logits_processor=logits_processor,
logits_warper=logits_warper,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:
# 11. prepare beam search scorer
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
num_beam_groups=generation_config.num_beam_groups,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run beam search
return self.group_beam_search(
input_ids,
beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:
final_constraints = []
if generation_config.constraints is not None:
final_constraints = generation_config.constraints
if generation_config.force_words_ids is not None:
def typeerror():
raise ValueError(
"`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`"
f"of positive integers, but is {generation_config.force_words_ids}."
)
if (
not isinstance(generation_config.force_words_ids, list)
or len(generation_config.force_words_ids) == 0
):
typeerror()
for word_ids in generation_config.force_words_ids:
if isinstance(word_ids[0], list):
if not isinstance(word_ids, list) or len(word_ids) == 0:
typeerror()
if any(not isinstance(token_ids, list) for token_ids in word_ids):
typeerror()
if any(
any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
for token_ids in word_ids
):
typeerror()
constraint = DisjunctiveConstraint(word_ids)
else:
if not isinstance(word_ids, list) or len(word_ids) == 0:
typeerror()
if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):
typeerror()
constraint = PhrasalConstraint(word_ids)
final_constraints.append(constraint)
# 11. prepare beam search scorer
constrained_beam_scorer = ConstrainedBeamSearchScorer(
constraints=final_constraints,
batch_size=batch_size,
num_beams=generation_config.num_beams,
device=inputs_tensor.device,
length_penalty=generation_config.length_penalty,
do_early_stopping=generation_config.early_stopping,
num_beam_hyps_to_keep=generation_config.num_return_sequences,
max_length=generation_config.max_length,
)
# 12. interleave input_ids with `num_beams` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run beam search
return self.constrained_beam_search(
input_ids,
constrained_beam_scorer=constrained_beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
synced_gpus=synced_gpus,
**model_kwargs,
)
def lookahead_prepare_inputs_for_generation(self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
**kwargs):
position_ids = kwargs.get("position_ids", None)
decoding_kwargs = kwargs.get('decoding_kwargs', {})
decoding_length = decoding_kwargs.get('decoding_length', 64)
branch_length = decoding_kwargs.get('branch_length', 12)
decoding_mode = decoding_kwargs.get('decoding_mode', 'hier')
max_length = decoding_kwargs.get('max_length', 2048)
update_branch_length = min(branch_length, max_length - input_ids.size(-1))
assert update_branch_length > 0, f'{branch_length=} {max_length=} {input_ids.size(-1)=} {update_branch_length=}'
if past_key_values is None:
if inputs_embeds is not None and input_ids is not None:
model_inputs = {"inputs_embeds": inputs_embeds, "input_ids": input_ids}
length = input_ids.size(1)
elif input_ids is not None:
model_inputs = {"input_ids": input_ids}
length = input_ids.size(1)
elif inputs_embeds is not None:
model_inputs = {"inputs_embeds": inputs_embeds}
length = input_ids.size(1)
else:
raise ValueError('either input_ids or inputs_embeds is not None')
update_attention_mask = attention_mask[:, :, :length, :length]
model_inputs.update(
{"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": update_attention_mask,
"decoding_kwargs": decoding_kwargs
})
if position_ids is not None:
model_inputs["position_ids"] = self._get_position_ids(position_ids, encoding=True, length=length)
else:
decoding_qids = input_ids[0, -2:].tolist()
# decoding_qids = decoding_kwargs['input_id_list'][0][-2:]
min_input_size = 0
min_output_size = max(decoding_length // 2, 1)
if decoding_mode in ('hier', 'par', 'one'):
decoding_mode = decoding_mode + '_mix'
fmt, mode = decoding_mode.split('_')
method_name = fmt + '_get'
decoding_ids, decoding_masks, sizes = getattr(self.lookahead_cache, method_name)(decoding_qids,
decoding_length=decoding_length,
branch_length=update_branch_length,
min_input_size=min_input_size,
min_output_size=min_output_size,
mode=mode,
idx=0)
decoding_input_ids = torch.tensor([decoding_ids], dtype=torch.long, device=input_ids.device)
prefix_length = input_ids.size(-1) - 1
fresh_length = len(decoding_ids)
ppl = prefix_length + fresh_length
assert ppl <= attention_mask.size(2), \
f'{max_length=} {update_branch_length=} {prefix_length=} {fresh_length=} {attention_mask.shape=}'
prefix_mask_tensor = attention_mask[:, :, prefix_length:ppl, :prefix_length]
decoding_mask_tensor = torch.from_numpy(decoding_masks[None, None]).to(
dtype=attention_mask.dtype, device=attention_mask.device)
decoding_attention_mask = torch.cat([prefix_mask_tensor, decoding_mask_tensor], dim=3)
decoding_kwargs.update({'decoding_qids': decoding_qids,
'decoding_ids': decoding_ids,
'decoding_masks': decoding_masks,
'sizes': sizes,
})
model_inputs = {'decoding_kwargs': decoding_kwargs}
model_inputs.update(
{
"input_ids": decoding_input_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"attention_mask": decoding_attention_mask
}
)
if position_ids is not None:
indices = torch.sum(decoding_attention_mask, dim=3).squeeze(1)[0]
model_inputs["position_ids"] = self._get_position_ids(position_ids, indices=indices, encoding=False)
return model_inputs
def _get_position_ids(self, full_position_ids, indices=None, length=None, encoding=True):
if encoding:
return full_position_ids[..., :length]
else:
return full_position_ids[..., indices]
def _lookahead_update_model_kwargs_for_generation(
self,
outputs: ModelOutput,
model_kwargs: Dict[str, Any],
is_encoder_decoder: bool = False,
standardize_cache_format: bool = False,
logits_processor: Optional[LogitsProcessorList] = None,
input_ids: Optional[torch.Tensor] = None,
) -> Dict[str, Any]:
# update past_key_values
model_kwargs["past_key_values"] = self._extract_past_from_model_output(
outputs, standardize_cache_format=standardize_cache_format
)
decoding_kwargs = model_kwargs['decoding_kwargs']
decoding_ids = decoding_kwargs.get('decoding_ids', [])
if len(decoding_ids) <= 1:
next_token_logits = outputs.logits[:, -1:, :]
# pre-process distribution
# next_tokens_scores = logits_processor(input_ids, next_token_logits)
bs, nt, nv = next_token_logits.shape
next_tokens_scores = logits_processor(input_ids, next_token_logits.squeeze(1)).unsqueeze(1)
if decoding_kwargs.get('do_sample', False):
probs = nn.functional.softmax(next_tokens_scores, dim=-1)
next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)
else:
next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()
model_kwargs['next_tokens'] = next_tokens
model_kwargs['next_tokens_scores'] = next_tokens_scores
next_token_list = next_tokens.tolist()
model_kwargs['next_token_list'] = next_token_list
decoding_kwargs['input_id_list'][0].extend(next_token_list[0])
decoding_kwargs['dls'].append(1)
decoding_kwargs['edls'].append(1)
if decoding_kwargs.get('debug_lookahead', False):
decoding_qids = decoding_kwargs.get('decoding_qids', [])
print(f'size:0 query:{decoding_qids} next_token:{next_token_list[0]}')
else:
# TODO: accurate logit_processor
# next_tokens_scores = logits_processor(input_ids, outputs.logits)
bs, nt, nv = outputs.logits.shape
next_tokens_scores = logits_processor(input_ids.repeat(1, nt).view(bs * nt, -1),
outputs.logits.view(bs * nt, -1)).view(bs, nt, -1)
if decoding_kwargs.get('do_sample', False):
probs = nn.functional.softmax(next_tokens_scores, dim=-1)
bs, nt, nv = probs.shape
next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)
else:
next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()
next_token_list = next_tokens.tolist()[0]
decoding_ids = decoding_kwargs['decoding_ids'][1:]
decoding_mask = decoding_kwargs['decoding_masks']
sizes = decoding_kwargs['sizes']
max_match_index = 0
max_match_count = 0
max_decoding_ids_slice = None
max_next_token_slice = None
for i in range(len(decoding_ids)):
mask_indices = np.nonzero(decoding_mask[i + 1, 1:])[0]
decoding_ids_slice = [decoding_ids[j] for j in mask_indices]
next_token_slice = [next_token_list[0]] + [next_token_list[j + 1] for j in mask_indices]
c = len(decoding_ids_slice)
for j, p in enumerate(decoding_ids_slice):
if next_token_slice[j] != p:
c = j
break
if c > max_match_count:
max_match_count = c
max_match_index = i
if c >= max_match_count:
max_decoding_ids_slice = decoding_ids_slice
max_next_token_slice = next_token_slice
# if decoding_kwargs['eos'] in decoding_ids:
# max_match_count = 0
prefix_plus_count = input_ids.size(-1)
match_idx = np.nonzero(decoding_mask[max_match_index + 1, 1:])[0][:max_match_count]
if len(decoding_ids) != max_match_count:
past = model_kwargs["past_key_values"]
device = past[0][0].device
kv_idx = torch.tensor(match_idx + prefix_plus_count, dtype=torch.long, device=device)
model_kwargs["past_key_values"] = self._update_cache(past,
kv_idx,
prefix_and_next_count=prefix_plus_count,
max_match_count=max_match_count,
max_match_index=max_match_index)
next_token_list = [next_token_list[0:1] + [next_token_list[x + 1] for x in match_idx]]
next_tokens = torch.tensor(next_token_list, dtype=torch.long, device=input_ids.device)
model_kwargs['next_tokens'] = next_tokens
model_kwargs['next_token_list'] = next_token_list
decoding_kwargs['input_id_list'][0].extend(next_token_list[0])
decoding_kwargs['dls'].append(len(decoding_ids))
decoding_kwargs['edls'].append(max_match_count + 1)
if decoding_kwargs.get('debug_lookahead', False):
lengths = np.sum(decoding_mask, axis=1) - 1
l = np.concatenate([lengths[:-1][(lengths[1:] - lengths[:-1]) <= 0], lengths[-1:]], axis=0)
ls = ','.join(l.astype(np.str_))
decoding_qids = decoding_kwargs['decoding_qids']
size_str = ','.join([str(x) for x in sizes])
print(
f'decoding_length:{len(decoding_ids)+1} accept_length:{max_match_count+1} '
f'query:{decoding_qids} source:{size_str} lengths:{ls} index:{max_match_index} '
f'branch_token:{max_decoding_ids_slice} next_token:{max_next_token_slice}')
return model_kwargs
def _update_cache(self, past_key_values, kv_idx, prefix_and_next_count=None, max_match_count=None,
max_match_index=None):
update_past_key_values = []
for k, v in past_key_values:
if max_match_index + 1 == max_match_count:
k = k[:, :, :prefix_and_next_count + max_match_count]
v = v[:, :, :prefix_and_next_count + max_match_count]
else:
k = torch.concat([k[:, :, :prefix_and_next_count], k[:, :, kv_idx]], 2)
v = torch.concat([v[:, :, :prefix_and_next_count], v[:, :, kv_idx]], 2)
update_past_key_values.append((k, v))
return tuple(update_past_key_values)
def lookahead_generation(
self,
input_ids: torch.LongTensor,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[Union[int, List[int]]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
synced_gpus: bool = False,
streamer: Optional["BaseStreamer"] = None,
**model_kwargs,
) -> Union[GreedySearchOutput, torch.LongTensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be
used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
<Tip warning={true}>
In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()
instead. For an overview of generation strategies and code examples, check the [following
guide](../generation_strategies).
</Tip>
Parameters:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`LogitsProcessorList`, *optional*):
An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
stopping_criteria (`StoppingCriteriaList`, *optional*):
An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]
used to tell if the generation loop should stop.
max_length (`int`, *optional*, defaults to 20):
**DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated
tokens. The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, List[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
streamer (`BaseStreamer`, *optional*):
Streamer object that will be used to stream the generated sequences. Generated tokens are passed
through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `forward` function of the model.
If model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or
`torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a
[`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... AutoModelForCausalLM,
... LogitsProcessorList,
... MinLengthLogitsProcessor,
... StoppingCriteriaList,
... MaxLengthCriteria,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelForCausalLM.from_pretrained("gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
>>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
>>> input_prompt = "It might be possible to"
>>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids
>>> # instantiate logits processors
>>> logits_processor = LogitsProcessorList(
... [
... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])
>>> outputs = model.greedy_search(
... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria
... )
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
["It might be possible to get a better understanding of the nature of the problem, but it's not"]
```"""
# init values
if not hasattr(self, 'lookahead_cache'):
self.lookahead_cache = LookaheadCache()
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
if max_length is not None:
warnings.warn(
"`max_length` is deprecated in this function, use"
" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.",
UserWarning,
)
stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
eos_token_id_tensor = torch.tensor(eos_token_id, device=input_ids.device) if eos_token_id is not None else None
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
# init attention / hidden states / scores tuples
scores = () if (return_dict_in_generate and output_scores) else None
decoder_attentions = () if (return_dict_in_generate and output_attentions) else None
cross_attentions = () if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
if return_dict_in_generate and self.config.is_encoder_decoder:
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
decoding_kwargs = model_kwargs['decoding_kwargs']
decoding_kwargs.update({
'eos': eos_token_id[0] if eos_token_id is not None else 2,
'edls': [],
'dls': [],
'fts': []
})
decoding_length = decoding_kwargs.get('decoding_length', 64)
stop_max_length = stopping_criteria.max_length
decoding_max_length = stop_max_length + decoding_length + 1
attention_mask = model_kwargs.get('attention_mask', None)
input_device = input_ids.device
if attention_mask is None:
bs = input_ids.size(0)
full_attention_mask = torch.tril(
torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long, device=input_device),
0)
elif len(attention_mask.shape) == 2:
# from [bs, src_len] to [bs,1,max_len,max_len]
bs, src_len = attention_mask.shape
pad_len = decoding_max_length - src_len
attention_mask = attention_mask.long()
if pad_len > 0:
pad_mask = torch.ones((bs, pad_len), dtype=torch.long, device=attention_mask.device)
attention_mask = torch.cat([attention_mask, pad_mask], 1)
full_attention_mask = torch.tril(attention_mask[:, None, None].expand(-1, -1, decoding_max_length, -1), 0)
elif len(attention_mask.shape) == 4:
bs, _, src_len, tgt_len = attention_mask.shape
attention_mask = attention_mask.long()
if src_len < decoding_max_length or tgt_len < decoding_max_length:
full_attention_mask = torch.tril(
torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long,
device=input_device),
0)
full_attention_mask[:, :, :src_len, :tgt_len] = attention_mask
else:
full_attention_mask = attention_mask
else:
raise ValueError(f'unsupport attention_mask.shape:{attention_mask.shape}')
model_kwargs['attention_mask'] = full_attention_mask
decoding_kwargs['max_length'] = stop_max_length
decoding_kwargs['decoding_max_length'] = decoding_max_length
# keep track of which sequences are already finished
unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device)
assert input_ids.size(0) == 1
input_id_list = input_ids[0].tolist()
decoding_kwargs['input_id_list'] = [input_id_list]
branch_length = decoding_kwargs.get('branch_length', 12)
self.lookahead_cache.put(input_id_list[1:], branch_length=branch_length + 1, mode='input', idx=0)
ts = time.time()
this_peer_finished = False # used by synced_gpus only
while True:
if synced_gpus:
# Under synced_gpus the `forward` call must continue until all gpus complete their sequence.
# The following logic allows an early break if all peers finished generating their sequence
this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)
# send 0.0 if we finished, 1.0 otherwise
dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)
# did all peers finish? the reduced sum will be 0.0 then
if this_peer_finished_flag.item() == 0.0:
break
# prepare model inputs
model_inputs = self.lookahead_prepare_inputs_for_generation(input_ids, **model_kwargs)
decoding_kwargs = model_inputs.pop('decoding_kwargs', {})
# forward pass to get next token
outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
if synced_gpus and this_peer_finished:
continue # don't waste resources running the code we don't need
model_kwargs['decoding_kwargs'] = decoding_kwargs
model_kwargs = self._lookahead_update_model_kwargs_for_generation(
outputs,
model_kwargs,
is_encoder_decoder=self.config.is_encoder_decoder,
input_ids=input_ids,
logits_processor=logits_processor
)
next_tokens = model_kwargs['next_tokens']
next_tokens_scores = model_kwargs['next_tokens_scores']
next_token_list = model_kwargs['next_token_list']
# finished sentences should have their next token be a padding token
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)
# update generated ids, model inputs, and length for next step
input_ids = torch.cat([input_ids, next_tokens], dim=-1)
if streamer is not None:
streamer.put(next_token_list)
self.lookahead_cache.stream_put(next_token_list[0], branch_length=branch_length + 1, final=False,
mode='output', idx=0)
# Store scores, attentions and hidden_states when required
if return_dict_in_generate:
if output_scores:
scores += (next_tokens_scores,)
if output_attentions:
decoder_attentions += (
(outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)
)
if self.config.is_encoder_decoder:
cross_attentions += (outputs.cross_attentions,)
if output_hidden_states:
decoder_hidden_states += (
(outputs.decoder_hidden_states,)
if self.config.is_encoder_decoder
else (outputs.hidden_states,)
)
# if eos_token was found in one sentence, set sentence to finished
if eos_token_id_tensor is not None:
# unfinished_sequences = unfinished_sequences.mul(
# next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)
# )
unfinished_sequences = unfinished_sequences.mul(
next_tokens[:, :, None].ne(eos_token_id_tensor).prod(dim=2).prod(dim=1))
# stop when each sentence is finished
if unfinished_sequences.max() == 0:
this_peer_finished = True
# stop if we exceed the maximum length
if stopping_criteria(input_ids, scores):
this_peer_finished = True
te = time.time()
model_kwargs['decoding_kwargs']['fts'].append(te - ts)
ts = te
if this_peer_finished and not synced_gpus:
self.lookahead_cache.stream_put([], branch_length=branch_length + 1, final=True,
mode='output', idx=0)
break
if streamer is not None:
streamer.end()
if return_dict_in_generate:
if self.config.is_encoder_decoder:
return GreedySearchEncoderDecoderOutput(
sequences=input_ids,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
kwargs = {'dls': model_kwargs['decoding_kwargs']['dls'],
'edls': model_kwargs['decoding_kwargs']['edls'],
'fts': model_kwargs['decoding_kwargs']['fts']} | return LookaheadDecoderOnlyOutput( | 2 | 2023-12-19 13:11:38+00:00 | 8k |
Its-Haze/league-rpc-linux | league_rpc_linux/__main__.py | [
{
"identifier": "gather_ingame_information",
"path": "league_rpc_linux/champion.py",
"snippet": "def gather_ingame_information() -> tuple[str, str, int, str, int, int]:\n \"\"\"\n Get the current playing champion name.\n \"\"\"\n all_game_data_url = ALL_GAME_DATA_URL\n your_summoner_name = get_summoner_name()\n\n champion_name: str | None = None\n skin_id: int | None = None\n skin_name: str | None = None\n game_mode: str | None = None # Set if the game mode was never found.. Maybe you are playing something new?\n level: int | None = None\n gold: int | None = None\n\n if response := wait_until_exists(\n url=all_game_data_url,\n custom_message=\"Did not find game data.. Will try again in 5 seconds\",\n ):\n parsed_data = response.json()\n game_mode = GAME_MODE_CONVERT_MAP.get(\n parsed_data[\"gameData\"][\"gameMode\"],\n parsed_data[\"gameData\"][\"gameMode\"],\n )\n\n if game_mode == \"TFT\":\n # If the currentGame is TFT.. gather the relevant information\n level = get_level()\n else:\n # If the gamemode is LEAGUE gather the relevant information.\n champion_name, skin_id, skin_name = gather_league_data(\n parsed_data=parsed_data, summoners_name=your_summoner_name\n )\n if game_mode == \"Arena\":\n level, gold = get_level(), get_gold()\n print(\"-\" * 50)\n if champion_name:\n print(\n f\"{Colors.yellow}Champion name found {Colors.green}({CHAMPION_NAME_CONVERT_MAP.get(champion_name, champion_name)}),{Colors.yellow} continuing..{Colors.reset}\"\n )\n if skin_name:\n print(\n f\"{Colors.yellow}Skin detected: {Colors.green}{skin_name},{Colors.yellow} continuing..{Colors.reset}\"\n )\n if game_mode:\n print(\n f\"{Colors.yellow}Game mode detected: {Colors.green}{game_mode},{Colors.yellow} continuing..{Colors.reset}\"\n )\n print(\"-\" * 50)\n\n # Returns default values if information was not found.\n return (\n (champion_name or \"\"),\n (skin_name or \"\"),\n (skin_id or 0),\n (game_mode or \"\"),\n (level or 0),\n (gold or 0),\n )"
},
{
"identifier": "get_skin_asset",
"path": "league_rpc_linux/champion.py",
"snippet": "def get_skin_asset(\n champion_name: str,\n skin_id: int,\n) -> str:\n \"\"\"\n Returns the URL for the skin/default skin of the champion.\n If a chroma has been selected, it will return the base skin for that chroma.\n Since RIOT does not have individual images for each chroma.\n \"\"\"\n\n while skin_id:\n url = f\"{BASE_SKIN_URL}{champion_name}_{skin_id}.jpg\"\n if not check_url(url):\n skin_id -= 1\n continue\n return url\n\n url = f\"{BASE_SKIN_URL}{champion_name}_0.jpg\"\n return url"
},
{
"identifier": "Colors",
"path": "league_rpc_linux/colors.py",
"snippet": "class Colors:\n \"\"\"\n Dataclass, storing the different colors that is used in the program.\n \"\"\"\n\n dred = \"\\033[31m\"\n dgreen = \"\\033[32m\"\n yellow = \"\\033[33m\"\n dblue = \"\\033[34m\"\n dmagenta = \"\\033[35m\"\n dcyan = \"\\033[36m\"\n lgrey = \"\\033[37m\"\n dgray = \"\\033[90m\"\n red = \"\\033[91m\"\n green = \"\\033[92m\"\n orange = \"\\033[93m\"\n blue = \"\\033[94m\"\n magenta = \"\\033[95m\"\n cyan = \"\\033[96m\"\n white = \"\\033[97m\"\n reset = \"\\033[0m\"\n\n @property\n def logo(self) -> str:\n \"\"\"Just prints the LEAGUE rpc logo, in your favorite Terminal Emulator.\"\"\"\n\n return rf\"\"\"\n {self.yellow} _ {self.dblue} _____ _____ _____ {self.reset}\n {self.yellow} | | {self.dblue}| __ \\| __ \\ / ____|{self.reset}\n {self.yellow} | | ___ __ _ __ _ _ _ ___ {self.dblue}| |__) | |__) | | {self.reset}\n {self.yellow} | | / _ \\/ _` |/ _` | | | |/ _ \\ {self.dblue}| _ /| ___/| | {self.reset}\n {self.yellow} | |___| __/ (_| | (_| | |_| | __/ {self.dblue}| | \\ \\| | | |____ {self.reset}\n {self.yellow} |______\\___|\\__,_|\\__, |\\__,_|\\___| {self.dblue}|_| \\_\\_| \\_____|{self.reset}\n {self.yellow} __/ | {self.reset}\n {self.yellow} |___/ {self.reset}\n \"\"\""
},
{
"identifier": "ALL_GAME_DATA_URL",
"path": "league_rpc_linux/const.py",
"snippet": "ALL_GAME_DATA_URL = \"https://127.0.0.1:2999/liveclientdata/allgamedata\""
},
{
"identifier": "CHAMPION_NAME_CONVERT_MAP",
"path": "league_rpc_linux/const.py",
"snippet": "CHAMPION_NAME_CONVERT_MAP = {\n \"AurelionSol\": \"Aurelion Sol\",\n \"Belveth\": \"Bel'Veth\",\n \"Chogath\": \"Cho'Gath\",\n \"DrMundo\": \"Dr. Mundo\",\n \"JarvanIV\": \"Jarvan IV\",\n \"Kaisa\": \"Kai'Sa\",\n \"Khazix\": \"Kha'Zix\",\n \"KogMaw\": \"Kog'Maw\",\n \"KSante\": \"K'Sante\",\n \"LeeSin\": \"Lee Sin\",\n \"MasterYi\": \"Master Yi\",\n \"MissFortune\": \"Miss Fortune\",\n \"Nunu\": \"Nunu & Willump\",\n \"RekSai\": \"Rek'Sai\",\n \"Renata\": \"Renata Glasc\",\n \"TahmKench\": \"Tahm Kench\",\n \"TwistedFate\": \"Twisted Fate\",\n \"Velkoz\": \"Vel'Koz\",\n \"MonkeyKing\": \"Wukong\",\n \"XinZhao\": \"Xin Zhao\",\n}"
},
{
"identifier": "DEFAULT_CLIENT_ID",
"path": "league_rpc_linux/const.py",
"snippet": "DEFAULT_CLIENT_ID = \"1185274747836174377\""
},
{
"identifier": "DISCORD_PROCESS_NAMES",
"path": "league_rpc_linux/const.py",
"snippet": "DISCORD_PROCESS_NAMES = [\"Discord\", \"DiscordPTB\", \"DiscordCanary\", \"electron\"]"
},
{
"identifier": "LEAGUE_OF_LEGENDS_LOGO",
"path": "league_rpc_linux/const.py",
"snippet": "LEAGUE_OF_LEGENDS_LOGO = \"https://github.com/Its-Haze/league-rpc-linux/blob/master/assets/leagueoflegends.png?raw=true\""
},
{
"identifier": "SMALL_TEXT",
"path": "league_rpc_linux/const.py",
"snippet": "SMALL_TEXT = \"github.com/Its-Haze/league-rpc-linux\""
},
{
"identifier": "get_current_ingame_time",
"path": "league_rpc_linux/gametime.py",
"snippet": "def get_current_ingame_time(default_time: int) -> int:\n \"\"\"\n Gets the current time of the game.\n \"\"\"\n url = \"https://127.0.0.1:2999/liveclientdata/gamestats\"\n if response := wait_until_exists(\n url=url,\n custom_message=\"\"\"\n Was unable to find the game time.\n Fallback (the time from which you executed this script) is now set as the 'elapsed time' of the game\n \"Contact @haze.dev on discord, or submit a ticket on Github.\n \"\"\",\n ):\n return int(response.json()[\"gameTime\"])\n return default_time"
},
{
"identifier": "get_creepscore",
"path": "league_rpc_linux/kda.py",
"snippet": "def get_creepscore() -> str:\n \"\"\"\n Get the current creepScore of your live game\n creepScore is updated every 10cs by Riot.\n \"\"\"\n response = get_current_user_stats()\n\n if isinstance(response, Response):\n parsed_data = response.json()\n creep_score = str(parsed_data[\"creepScore\"])\n return f\"{creep_score}cs\"\n\n return \"\""
},
{
"identifier": "get_gold",
"path": "league_rpc_linux/kda.py",
"snippet": "def get_gold() -> int:\n \"\"\"\n Get the current gold of your game.\n \"\"\"\n response = get_current_active_player_stats()\n\n if isinstance(response, Response):\n parsed_data = response.json()\n gold = int(parsed_data[\"currentGold\"])\n\n return gold\n return 0"
},
{
"identifier": "get_kda",
"path": "league_rpc_linux/kda.py",
"snippet": "def get_kda() -> str:\n \"\"\"\n Get the current KDA of your game.\n \"\"\"\n response = get_current_user_stats()\n\n if isinstance(response, Response):\n parsed_data = response.json()\n kills = str(parsed_data[\"kills\"])\n deaths = str(parsed_data[\"deaths\"])\n assists = str(parsed_data[\"assists\"])\n\n return f\"{kills}/{deaths}/{assists}\"\n return \"\""
},
{
"identifier": "get_level",
"path": "league_rpc_linux/kda.py",
"snippet": "def get_level() -> int:\n \"\"\"\n Get the current Level of your game.\n \"\"\"\n response = get_current_active_player_stats()\n\n if isinstance(response, Response):\n parsed_data = response.json()\n level = int(parsed_data[\"level\"])\n\n return level\n return 0"
},
{
"identifier": "start_connector",
"path": "league_rpc_linux/lcu_api/lcu_connector.py",
"snippet": "def start_connector(rpc_from_main: Presence, cli_args: Namespace) -> None:\n module_data.rpc = rpc_from_main\n module_data.cli_args = cli_args\n module_data.connector.start()"
},
{
"identifier": "wait_until_exists",
"path": "league_rpc_linux/polling.py",
"snippet": "def wait_until_exists(\n url: str,\n custom_message: str = \"\",\n expected_response_code: int = 200,\n timeout: int = 30,\n n_sleep: float | int = 5, # Not needed, but good to have.\n n_total_amount: int = 20,\n startup: int = False, # Set to True on the first time it tries to poll the local api. (onGameStart)\n) -> requests.Response | None:\n \"\"\"\n Polling on the local riot api until success is returned.\n \"\"\"\n\n for _ in range(n_total_amount):\n try:\n response = requests.get(url, timeout=timeout, verify=False)\n if response.status_code != expected_response_code:\n time.sleep(n_sleep)\n continue\n break\n except (\n NewConnectionError,\n ConnectionError,\n requests.exceptions.ConnectionError,\n ):\n # These errors occur either before the api has started..\n # Or when the game has ended\n if startup:\n # Make sure we continue to poll the api during the start of a game.\n time.sleep(n_sleep)\n continue\n\n # When game ends, we don't care about polling the api.\n return None\n else:\n print(custom_message)\n return None\n return response"
},
{
"identifier": "check_discord_process",
"path": "league_rpc_linux/processes/process.py",
"snippet": "def check_discord_process(\n process_names: list[str], client_id: str, wait_for_discord: int\n) -> pypresence.Presence:\n \"\"\"\n Checks if discord process is running.\n Connects to Discord Rich Presence if it is found.\n \"\"\"\n print(f\"{Colors.yellow}Checking if Discord is running...{Colors.reset}\")\n\n look_for_processes = f\"({Colors.green}{', '.join(process_names)}{Colors.blue})\"\n\n time.sleep(1)\n\n if wait_for_discord == -1:\n print(\n f\"{Colors.yellow}Will wait {Colors.green}indefinitely{Colors.yellow} for Discord to start... Remember, forever is a long time.. use {Colors.green}CTRL + C{Colors.yellow} if you would like to quit.{Colors.reset}\"\n )\n\n wait_time = 0\n while True:\n if not processes_exists(process_names=process_names):\n if wait_for_discord == -1:\n time.sleep(10)\n continue\n elif wait_time >= wait_for_discord:\n print(\n f\"\"\"{Colors.red}Discord not running!\n {Colors.blue}Could not find any process with the names {look_for_processes} running on your system.\n Is your Discord process named something else? Try --add-process <name>{Colors.reset}\"\"\"\n )\n\n if not wait_for_discord:\n print(\n f\"{Colors.green}Want to add waiting time for discord? Use --wait-for-discord <seconds>. (-1 = infinite, or until CTRL + C)\"\n )\n sys.exit()\n else:\n print(\n f\"{Colors.yellow}Will wait for Discord to start. Time left: {wait_for_discord - wait_time} seconds...\"\n )\n time.sleep(5)\n wait_time += 5\n continue\n break\n\n print(f\"{Colors.green}Discord is running! {Colors.dgray}(2/2){Colors.reset}\")\n\n for _ in range(5):\n time.sleep(3)\n try:\n rpc = pypresence.Presence(client_id)\n rpc.connect()\n break\n except pypresence.exceptions.InvalidID:\n print(\n f\"{Colors.red}Invalid Client ID. Make sure your Discord Application ID is correct.\"\n )\n sys.exit()\n except pypresence.exceptions.DiscordError:\n # Sometimes when starting discord, an error can occur saying that you logged out.\n # Weird but can be ignored since it usually works a second or so after.\n time.sleep(1)\n continue\n except pypresence.exceptions.DiscordNotFound:\n # Sometimes when starting discord, an error can occur saying that you logged out.\n # Weird but can be ignored since it usually works a second or so after.\n print(\n f\"{Colors.red}Pypresence (RPC) Could not find Discord installed and running on this machine.\"\n )\n sys.exit()\n except pypresence.exceptions.PipeClosed:\n # The pipe was closed. Catch this exception and re-connect your instance\n time.sleep(1)\n continue\n except ConnectionRefusedError:\n print(\n f\"\"\"\n {Colors.red}PyPresence encountered some problems, and could not connect to your Discord's RPC\n {Colors.blue}\n 1. One or more of the processes this script was looking for was found {look_for_processes}\n But Pypresence still was unable to detect a running discord-ipc\n 2. You may not have a discord ipc running. Try {Colors.reset}``{Colors.green}ls $XDG_RUNTIME_DIR | grep discord-ipc-{Colors.reset}``{Colors.blue} There should only be one result {Colors.reset}``{Colors.green}discord-ipc-0={Colors.reset}``\n {Colors.blue}3. Try restarting Discord. (Make sure the process is stopped before doing that.){Colors.reset}\n \"\"\"\n )\n # If process names were not found, but ipc exists. Try removing them & restarting\n if len((val := check_discord_ipc())) > 1:\n print(\n f\"\"\"\n {Colors.red}Detected multiple ipc's running.{Colors.reset}\n You seem to have more than 1 ipc running (which is unusual).\n If you know that discord is running, but pypresence keep failing to connect.\n It might be cause you have multiple ipc's running. try removing the following ipc's and {Colors.green}restart discord.{Colors.reset}\n {Colors.yellow}ipc's: {' , '.join(val)}{Colors.reset}\n run: ``{Colors.green}rm {' '.join(val)}{Colors.reset}``\n Or you just don't have discord up and running..\n \"\"\"\n )\n print(\n f\"{Colors.red}Raising Exception found by PyPresence, and exiting..{Colors.reset}\"\n )\n sys.exit()\n else:\n print(\n f\"{Colors.red}Discord process was found but RPC could not be connected.{Colors.reset}\"\n )\n sys.exit()\n return rpc"
},
{
"identifier": "check_league_client_process",
"path": "league_rpc_linux/processes/process.py",
"snippet": "def check_league_client_process(wait_for_league: int):\n \"\"\"\n Checks league client processes.\n \"\"\"\n league_processes = [\"LeagueClient.exe\", \"LeagueClientUx.exe\"]\n\n print(f\"{Colors.yellow}Checking if LeagueClient.exe is running...\")\n time.sleep(1)\n if wait_for_league == -1:\n print(\n f\"{Colors.yellow}Will wait {Colors.green}indefinitely{Colors.yellow} for League to start... Remember, forever is a long time.. use {Colors.green}CTRL + C{Colors.yellow} if you would like to quit.{Colors.reset}\"\n )\n\n wait_time = 0\n while True:\n if not processes_exists(process_names=league_processes):\n if wait_for_league == -1:\n time.sleep(10)\n continue\n elif wait_time >= wait_for_league:\n print(\n f\"{Colors.red}League Client is not running! Exiting after waiting {wait_for_league} seconds.{Colors.reset}\"\n )\n if not wait_for_league:\n print(\n f\"{Colors.green}Want to add waiting time for League? Use --wait-for-league <seconds>. (-1 = infinite, or until CTRL + C)\"\n )\n sys.exit()\n else:\n print(\n f\"{Colors.yellow}Will wait for League to start. Time left: {wait_for_league - wait_time} seconds...\"\n )\n time.sleep(5)\n wait_time += 5\n continue\n break\n\n print(f\"{Colors.green}League client is running!{Colors.dgray}(1/2){Colors.reset}\")"
},
{
"identifier": "player_state",
"path": "league_rpc_linux/processes/process.py",
"snippet": "def player_state() -> str | None:\n \"\"\"\n Returns the player state\n \"\"\"\n current_state: str | None = None\n\n if processes_exists(process_names=[\"LeagueClient.exe\", \"LeagueClientUx.exe\"]):\n if process_exists(\"League of Legends.exe\"):\n current_state = \"InGame\"\n else:\n current_state = \"InLobby\"\n return current_state"
},
{
"identifier": "discord_reconnect_attempt",
"path": "league_rpc_linux/reconnect.py",
"snippet": "def discord_reconnect_attempt(\n rpc: pypresence.Presence,\n amount_of_tries: int = 12,\n amount_of_waiting: int = 5,\n):\n \"\"\"\n Attempts to connect to discord, over a period of time. If it still fails, it will exit the program.\n \"\"\"\n for i in range(amount_of_tries):\n try:\n time.sleep(amount_of_waiting)\n print(\n f\"{Colors.yellow}({i + 1}/{amount_of_tries}). Attempting to reconnect..{Colors.reset}\"\n )\n rpc.connect()\n print(\n f\"{Colors.green}Successfully reconnected.. Proceeding as normal.{Colors.reset}\"\n )\n break\n\n except (\n pypresence.exceptions.DiscordNotFound,\n pypresence.exceptions.DiscordError,\n pypresence.exceptions.InvalidPipe,\n pypresence.exceptions.PipeClosed,\n ConnectionError,\n ):\n pass\n else:\n print(\n f\"{Colors.red}Was unable to reconnect to Discord. after trying for {amount_of_tries * amount_of_waiting} seconds.{Colors.reset}\"\n )\n sys.exit()"
}
] | import argparse
import sys
import threading
import time
import nest_asyncio
import pypresence
from league_rpc_linux.champion import gather_ingame_information, get_skin_asset
from league_rpc_linux.colors import Colors
from league_rpc_linux.const import (
ALL_GAME_DATA_URL,
CHAMPION_NAME_CONVERT_MAP,
DEFAULT_CLIENT_ID,
DISCORD_PROCESS_NAMES,
LEAGUE_OF_LEGENDS_LOGO,
SMALL_TEXT,
)
from league_rpc_linux.gametime import get_current_ingame_time
from league_rpc_linux.kda import get_creepscore, get_gold, get_kda, get_level
from league_rpc_linux.lcu_api.lcu_connector import start_connector
from league_rpc_linux.polling import wait_until_exists
from league_rpc_linux.processes.process import (
check_discord_process,
check_league_client_process,
player_state,
)
from league_rpc_linux.reconnect import discord_reconnect_attempt | 5,930 | target=start_connector,
args=(
rpc,
cli_args,
),
daemon=True,
)
lcu_process.start()
print(f"\n{Colors.green}Successfully connected to Discord RPC!{Colors.reset}")
############################################################
start_time = int(time.time())
while True:
try:
match player_state():
case "InGame":
print(
f"\n{Colors.dblue}Detected game! Will soon gather data and update discord RPC{Colors.reset}"
)
# Poll the local league api until 200 response.
wait_until_exists(
url=ALL_GAME_DATA_URL,
custom_message="Failed to reach the local league api",
startup=True,
)
(
champ_name,
skin_name,
skin_id,
gamemode,
_,
_,
) = gather_ingame_information()
if gamemode == "TFT":
# TFT RPC
while player_state() == "InGame":
rpc.update( # type:ignore
large_image="https://wallpapercave.com/wp/wp7413493.jpg",
large_text="Playing TFT",
details="Teamfight Tactics",
state=f"In Game · lvl: {get_level()}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
start=int(time.time())
- get_current_ingame_time(default_time=start_time),
)
time.sleep(10)
elif gamemode == "Arena":
# ARENA RPC
skin_asset = get_skin_asset(
champion_name=champ_name,
skin_id=skin_id,
)
print(
f"{Colors.green}Successfully gathered all data.{Colors.yellow}\nUpdating Discord Presence now!{Colors.reset}"
)
while player_state() == "InGame":
rpc.update( # type:ignore
large_image=skin_asset,
large_text=skin_name
if skin_name
else CHAMPION_NAME_CONVERT_MAP.get(
champ_name, champ_name
),
details=gamemode,
state=f"In Game {f'· {get_kda()} · lvl: {get_level()} · gold: {get_gold()}' if not cli_args.no_stats else ''}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
start=int(time.time())
- get_current_ingame_time(default_time=start_time),
)
time.sleep(10)
else:
# LEAGUE RPC
skin_asset = get_skin_asset(
champion_name=champ_name,
skin_id=skin_id,
)
print(
f"{Colors.green}Successfully gathered all data.{Colors.yellow}\nUpdating Discord Presence now!{Colors.reset}"
)
while player_state() == "InGame":
if not champ_name or not gamemode:
break
rpc.update( # type:ignore
large_image=skin_asset,
large_text=skin_name
if skin_name
else CHAMPION_NAME_CONVERT_MAP.get(
champ_name, champ_name
),
details=gamemode,
state=f"In Game {f'· {get_kda()} · {get_creepscore()}' if not cli_args.no_stats else ''}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
start=int(time.time())
- get_current_ingame_time(default_time=start_time),
)
time.sleep(10)
case "InLobby":
# Handled by lcu_process thread
# It will subscribe to websockets and update discord on events.
time.sleep(10)
case _:
print(
f"{Colors.red}LeagueOfLegends.exe was terminated. rpc shuting down..{Colors.reset}."
)
rpc.close()
sys.exit()
except pypresence.exceptions.PipeClosed:
# If the program crashes because pypresence failed to connect to a pipe. (Typically if Discord is closed.)
# The script will automatically try to reconnect..
# if it fails it will keep going until you either reconnect or after a long enough period of time has passed
print(
f"{Colors.red}Discord seems to be closed, will attempt to reconnect!{Colors.reset}"
)
|
# Discord Application: League of Linux
def main(cli_args: argparse.Namespace):
"""
This is the program that gets executed.
"""
############################################################
## Check Discord, RiotClient & LeagueClient processes ##
check_league_client_process(wait_for_league=cli_args.wait_for_league)
rpc = check_discord_process(
process_names=DISCORD_PROCESS_NAMES + cli_args.add_process,
client_id=cli_args.client_id,
wait_for_discord=cli_args.wait_for_discord,
)
# Start LCU_Thread
# This process will connect to the LCU API and updates the rpc based on data subscribed from the LCU API.
# In this case passing the rpc object to the process is easier than trying to return updated data from the process.
# Every In-Client update will be handled by the LCU_Thread process and will update the rpc accordingly.
lcu_process = threading.Thread(
target=start_connector,
args=(
rpc,
cli_args,
),
daemon=True,
)
lcu_process.start()
print(f"\n{Colors.green}Successfully connected to Discord RPC!{Colors.reset}")
############################################################
start_time = int(time.time())
while True:
try:
match player_state():
case "InGame":
print(
f"\n{Colors.dblue}Detected game! Will soon gather data and update discord RPC{Colors.reset}"
)
# Poll the local league api until 200 response.
wait_until_exists(
url=ALL_GAME_DATA_URL,
custom_message="Failed to reach the local league api",
startup=True,
)
(
champ_name,
skin_name,
skin_id,
gamemode,
_,
_,
) = gather_ingame_information()
if gamemode == "TFT":
# TFT RPC
while player_state() == "InGame":
rpc.update( # type:ignore
large_image="https://wallpapercave.com/wp/wp7413493.jpg",
large_text="Playing TFT",
details="Teamfight Tactics",
state=f"In Game · lvl: {get_level()}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
start=int(time.time())
- get_current_ingame_time(default_time=start_time),
)
time.sleep(10)
elif gamemode == "Arena":
# ARENA RPC
skin_asset = get_skin_asset(
champion_name=champ_name,
skin_id=skin_id,
)
print(
f"{Colors.green}Successfully gathered all data.{Colors.yellow}\nUpdating Discord Presence now!{Colors.reset}"
)
while player_state() == "InGame":
rpc.update( # type:ignore
large_image=skin_asset,
large_text=skin_name
if skin_name
else CHAMPION_NAME_CONVERT_MAP.get(
champ_name, champ_name
),
details=gamemode,
state=f"In Game {f'· {get_kda()} · lvl: {get_level()} · gold: {get_gold()}' if not cli_args.no_stats else ''}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
start=int(time.time())
- get_current_ingame_time(default_time=start_time),
)
time.sleep(10)
else:
# LEAGUE RPC
skin_asset = get_skin_asset(
champion_name=champ_name,
skin_id=skin_id,
)
print(
f"{Colors.green}Successfully gathered all data.{Colors.yellow}\nUpdating Discord Presence now!{Colors.reset}"
)
while player_state() == "InGame":
if not champ_name or not gamemode:
break
rpc.update( # type:ignore
large_image=skin_asset,
large_text=skin_name
if skin_name
else CHAMPION_NAME_CONVERT_MAP.get(
champ_name, champ_name
),
details=gamemode,
state=f"In Game {f'· {get_kda()} · {get_creepscore()}' if not cli_args.no_stats else ''}",
small_image=LEAGUE_OF_LEGENDS_LOGO,
small_text=SMALL_TEXT,
start=int(time.time())
- get_current_ingame_time(default_time=start_time),
)
time.sleep(10)
case "InLobby":
# Handled by lcu_process thread
# It will subscribe to websockets and update discord on events.
time.sleep(10)
case _:
print(
f"{Colors.red}LeagueOfLegends.exe was terminated. rpc shuting down..{Colors.reset}."
)
rpc.close()
sys.exit()
except pypresence.exceptions.PipeClosed:
# If the program crashes because pypresence failed to connect to a pipe. (Typically if Discord is closed.)
# The script will automatically try to reconnect..
# if it fails it will keep going until you either reconnect or after a long enough period of time has passed
print(
f"{Colors.red}Discord seems to be closed, will attempt to reconnect!{Colors.reset}"
) | discord_reconnect_attempt(rpc, amount_of_tries=12, amount_of_waiting=5) | 19 | 2023-12-15 22:21:53+00:00 | 8k |
huahuahuage/Bert-VITS2-Speech | api/tts.py | [
{
"identifier": "log_instance",
"path": "log.py",
"snippet": "DISABLED_LOGGER = [\"gradio.processing_utils\", \"gradio\", \"httpx\"]\r"
},
{
"identifier": "config_instance",
"path": "config.py",
"snippet": "CONFIG_PATH = \"config.json\"\r\ndef read_config(config_path:str) -> dict:\r\n def __init__(self) -> None:\r\n def get(self, key, default=None):\r\nclass ONNX_CONFIG:\r"
},
{
"identifier": "infor_onnx_instance",
"path": "onnx_infer/onnx_infer.py",
"snippet": "BERT_ENABLE = config_instance.get(\"bert_enable\", True)\r\nCHINESE_CHARACTER_MARK = config_instance.get(\"onnx_tts_models_chinese_mark\", \"中文\")\r\nONNX_PROVIDERS = [config_instance.get(\"onnx_providers\", \"CPUExecutionProvider\")]\r\nMODELS_PATH = os.path.abspath(config_instance.get(\"onnx_tts_models\", \"onnx/models\"))\r\nMODELS_BASE_NAME = os.path.basename(MODELS_PATH)\r\nMODELS_PARENT_PATH = os.path.dirname(MODELS_PATH)\r\nMODELS_PREFIX = os.path.join(MODELS_PATH, os.path.basename(MODELS_PATH))\r\nONNX_MODELS_PATH = {\r\n \"config\": f\"{MODELS_PARENT_PATH}/{MODELS_BASE_NAME}.json\",\r\n \"enc\": f\"{MODELS_PREFIX}_enc_p.onnx\",\r\n \"emb_g\": f\"{MODELS_PREFIX}_emb.onnx\",\r\n \"dp\": f\"{MODELS_PREFIX}_dp.onnx\",\r\n \"sdp\": f\"{MODELS_PREFIX}_sdp.onnx\",\r\n \"flow\": f\"{MODELS_PREFIX}_flow.onnx\",\r\n \"dec\": f\"{MODELS_PREFIX}_dec.onnx\",\r\n}\r\nclass SpeakerMap:\r\nclass ONNX_MODELS:\r\nclass ONNX_RUNTINE:\r\nclass INFER_ONNX:\r\n def __init__(self) -> None:\r\n def get_jp_speaker_name(self, speaker_name: str):\r\n def get_en_speaker_name(self, speaker_name: str):\r\n def __init__(self):\r\n def __call__(\r\n self,\r\n seq: np.int64,\r\n tone: np.int64,\r\n language_id: np.int64,\r\n bert_zh: np.float32,\r\n bert_jp: np.float32,\r\n bert_en: np.float32,\r\n speaker_id: int,\r\n seed: int = 114514,\r\n seq_noise_scale: float = 0.8,\r\n sdp_noise_scale: float = 0.6,\r\n length_scale: float = 1.0,\r\n sdp_ratio: float = 0.2,\r\n emotion: int = 0,\r\n ):\r\n def get_config(self, key: str, default=None):\r\ndef __add_blank(phone, tone, language, word2ph):\r\ndef convert_pad_shape(pad_shape):\r\ndef sequence_mask(length, max_length=None):\r\ndef generate_path(duration, mask):\r\ndef intersperse(lst, item):\r\ndef get_text(text: str, language: str, add_blank: bool = True) -> tuple:\r\n def __init__(self) -> None:\r\n def get_speaker_id(self, speaker_name: str, chinese_only: bool = True) -> int:\r\n def get_full_speaker_name(self, speaker_name: str, language_str: str = \"ZH\"):\r\n def __clamp(\r\n value: int | float, min_value: int | float = 0, max_value: int | float = 9\r\n ):\r\n def __skip_start(phones, tones, language_id, zh_bert, jp_bert, en_bert):\r\n def __skip_end(phones, tones, language_id, zh_bert, jp_bert, en_bert):\r\n def __params_specification(\r\n self,\r\n sdp_ratio: float,\r\n noise_scale: float,\r\n noise_scale_w: float,\r\n length_scale: float,\r\n emotion: int,\r\n ):\r\n def __text_to_model_inputs(\r\n self,\r\n text: str,\r\n language: str = \"ZH\",\r\n skip_start: bool = False,\r\n skip_end: bool = False,\r\n add_blank: bool = True,\r\n ):\r\n def infer(\r\n self,\r\n text: str,\r\n speaker_name: str,\r\n language: str = \"ZH\",\r\n sdp_ratio: float = 0.2,\r\n noise_scale: float = 0.8,\r\n noise_scale_w: float = 0.6,\r\n length_scale: float = 1.0,\r\n emotion: int = 7,\r\n seed: int = 114514,\r\n skip_start: bool = False,\r\n skip_end: bool = False,\r\n add_blank: bool = True,\r\n ) -> np.float32:\r\n def infer_multilang(\r\n self,\r\n text_list: list,\r\n speaker_name: str,\r\n language_list: list = [\"ZH\"],\r\n sdp_ratio: float = 0.2,\r\n noise_scale: float = 0.8,\r\n noise_scale_w: float = 0.6,\r\n length_scale: float = 1.0,\r\n emotion: int = 7,\r\n seed: int = 114514,\r\n skip_start: bool = False,\r\n skip_end: bool = False,\r\n add_blank: bool = True,\r\n ) -> np.float32:\r"
},
{
"identifier": "split_text",
"path": "api/split.py",
"snippet": "def split_text(text: str) -> Tuple[list, list]:\r\n \"\"\"\r\n 自动切割混合文本\r\n \"\"\"\r\n other_text_segments = __split_jp_en_text(text)\r\n # print(other_text_segments)\r\n text_segments = __divide_text(text, other_text_segments)\r\n\r\n if not text_segments:\r\n return None\r\n\r\n text_list = []\r\n language_list = []\r\n for text_segment in text_segments:\r\n text_list.append(text_segment[2])\r\n language_list.append(text_segment[3])\r\n return text_list, language_list\r"
},
{
"identifier": "text_split_to_sentence",
"path": "api/split.py",
"snippet": "def text_split_to_sentence(text: str) -> list:\r\n \"\"\"\r\n 将长文本按段落、句子、句内分别划分,生成三级列表\r\n \"\"\"\r\n # 首先按段落划分\r\n text_paragraph_list = __split_by_paragraph(text=text)\r\n # print(text_paragraph_list)\r\n if len(text_paragraph_list) == 0:\r\n return []\r\n third_list = []\r\n for text_paragraph in text_paragraph_list:\r\n # 按句子划分\r\n secondary_list = []\r\n text_sentence_list = __split_by_sentence(text_paragraph)\r\n for text_sentence in text_sentence_list:\r\n # print(__split_by_within_sentence(text_sentence))\r\n secondary_list.append(__split_by_within_sentence(text_sentence))\r\n third_list.append(secondary_list)\r\n return third_list\r"
},
{
"identifier": "rebuild_temp_dir",
"path": "api/utils.py",
"snippet": "def rebuild_temp_dir(dir_path: str, tips: str = \"正在清空API接口语音缓存...\"):\r\n \"\"\"\r\n 清空重建缓存文件夹\r\n \"\"\"\r\n # 删除缓存\r\n try:\r\n shutil.rmtree(dir_path)\r\n log_instance.info(tips)\r\n except OSError as e:\r\n pass\r\n # 重新建立缓存文件夹\r\n os.makedirs(dir_path, exist_ok=True)\r"
}
] | import os
import numpy as np
from uuid import uuid4
from log import log_instance
from config import config_instance
from scipy.io import wavfile
from typing import Callable, List
from dataclasses import dataclass
from onnx_infer.onnx_infer import infor_onnx_instance
from .split import split_text, text_split_to_sentence
from .utils import rebuild_temp_dir
| 3,747 | log_instance.info(
f"正在推理({str(index+1)}/{str(list_length)}):{speaker_name} -> {text}"
)
# 判断是否需要自动多语言切分
if language.lower() == "auto":
try:
del params_dict["language"]
except KeyError:
pass
audio = __generate_multilang_audio(**params_dict)
else:
params_dict["language"] = language
audio = __generate_single_audio(**params_dict)
# 将所有语音句子数据存入列表中
within_audio_list.append(audio)
# 插入静音数据
slient_audio = __generate_slient_audio(interval_time=within_interval)
within_audio_list.append(slient_audio)
# 删除最后一个静音数据
within_audio_list.pop()
# 将列表中的语音数据合成
audio_concat = np.concatenate(within_audio_list, axis=2)
return audio_concat
def __generate_multi_sentence(
text_list: List[str],
speaker_name: str,
language: str = "ZH",
sdp_ratio: float = SDP_RATIO,
noise_scale: float = NOISE,
noise_scale_w: float = NOISEW,
length_scale: float = LENGTH,
emotion: float = EMOTION,
seed: int = 114514,
within_interval: float = 0.5,
sentence_interval: float = 1.0,
) -> np.float32:
"""
根据多个句子生成语音
"""
# 获取局部变量
params_dict: dict = locals()
del params_dict["text_list"]
del params_dict["sentence_interval"]
sentence_audio_list = []
for whithin_text_list in text_list:
# 句子列表数据合成一个段落音频数据
params_dict["text_list"] = whithin_text_list
sentence_audio = __generate_multi_within(**params_dict)
sentence_audio_list.append(sentence_audio)
# 插入静音数据
slient_audio = __generate_slient_audio(interval_time=sentence_interval)
sentence_audio_list.append(slient_audio)
# 删除最后一个静音数据
sentence_audio_list.pop()
audio_concat = np.concatenate(sentence_audio_list, axis=2)
return audio_concat
def generate_tts_auto(
text: str,
speaker_name: str,
language: str = "ZH",
sdp_ratio: float = 0.2,
noise_scale: float = 0.6,
noise_scale_w: float = 0.8,
length_scale: float = 1.0,
emotion: int = 7,
seed: int = 114514,
within_interval: float = 0.5,
sentence_interval: float = 1.0,
paragraph_interval: float = 2.0,
) -> np.float32:
"""
自动切分,生成语音
"""
# 获取局部变量
params_dict: dict = locals()
del params_dict["text"]
del params_dict["paragraph_interval"]
# 根据文本进行按句子切分成三级列表
paragraph_sentences_text_list = text_split_to_sentence(text=text)
log_instance.debug(f"自动切分结果 {str(paragraph_sentences_text_list)}")
# 检测文本是否为空,为空直接返回空音频
if len(paragraph_sentences_text_list) == 0:
log_instance.warning("文本转语音推理失败:{speaker_name} -> {text} 文本内容不可为空。")
return __generate_empty_float32()
# 获取每一个段落所有句子的语音数据
paragraph_audio_list = []
for sentences_text_list in paragraph_sentences_text_list:
# 句子列表数据合成一个段落音频数据
params_dict["text_list"] = sentences_text_list
paragraph_audio = __generate_multi_sentence(**params_dict)
paragraph_audio_list.append(paragraph_audio)
# 插入静音数据
slient_audio = __generate_slient_audio(interval_time=paragraph_interval)
paragraph_audio_list.append(slient_audio)
# 删除最后一个静音数据
paragraph_audio_list.pop()
audio_concat = np.concatenate(paragraph_audio_list, axis=2)
return audio_concat
@dataclass
class InferHander:
single: Callable = None
auto: Callable = None
class GenerateTTS:
def __init__(self) -> None:
# 重建语音缓存文件夹
|
EMOTION = 7
SDP_RATIO = 0.2
NOISE = 0.6
NOISEW = 0.8
LENGTH = 0.8
LANGUAGE = "ZH"
AUDIO_RATE = 44100
TEMP_PATH = os.path.abspath("./temp")
def change_to_wav(
file_path: str, data: np.float32, sample_rate: int = AUDIO_RATE
) -> str:
"""
将返回的numpy数据转换成音频
"""
scaled_data = np.int16(data * 32767)
wavfile.write(file_path, sample_rate, scaled_data)
return file_path
def __generate_empty_float32(sample_rate: int = AUDIO_RATE) -> tuple:
"""
生成空音频的numpy数据
"""
return tuple(
sample_rate,
np.concatenate([np.zeros(sample_rate // 2)]),
)
def __generate_slient_audio(
interval_time: float = 1.5, sample_rate: int = AUDIO_RATE
) -> np.float32:
"""
生成指定秒数的空音频数据
"""
return np.zeros((int)(sample_rate * interval_time), dtype=np.float32).reshape(
1, 1, int(sample_rate * interval_time)
)
def __generate_single_audio(
text: str,
speaker_name: str,
language: str = "ZH",
sdp_ratio: float = SDP_RATIO,
noise_scale: float = NOISE,
noise_scale_w: float = NOISEW,
length_scale: float = LENGTH,
emotion: float = EMOTION,
seed: int = 114514,
) -> np.float32:
"""
根据text生成单语言音频
"""
audio = infor_onnx_instance.infer(
text=text,
speaker_name=speaker_name,
language=language,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
emotion=emotion,
seed=seed,
)
return audio
def __generate_multilang_audio(
text: str,
speaker_name: str,
sdp_ratio: float = SDP_RATIO,
noise_scale: float = NOISE,
noise_scale_w: float = NOISEW,
length_scale: float = LENGTH,
emotion: float = EMOTION,
seed: int = 114514,
) -> np.float32:
"""
根据text自动切分,生成多语言混合音频
"""
text_list, language_list = split_text(text)
if not language_list:
log_instance.warning("文本转语音推理失败:{speaker_name} -> {text} 文本内容不可为空。")
return __generate_empty_float32()
elif len(language_list) == 1:
audio = infor_onnx_instance.infer(
text=text_list[0],
speaker_name=speaker_name,
language=language_list[0],
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
emotion=emotion,
seed=seed,
)
else:
audio = infor_onnx_instance.infer_multilang(
text_list=text_list,
speaker_name=speaker_name,
language_list=language_list,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
emotion=emotion,
seed=seed,
)
return audio
def __generate_multi_within(
text_list: List[str],
speaker_name: str,
language: str = "ZH",
sdp_ratio: float = SDP_RATIO,
noise_scale: float = NOISE,
noise_scale_w: float = NOISEW,
length_scale: float = LENGTH,
emotion: float = EMOTION,
seed: int = 114514,
within_interval: float = 0.5,
) -> np.float32:
"""
根据多个句内文字段生成语音
"""
# 获取局部变量
params_dict: dict = locals()
del params_dict["text_list"]
del params_dict["within_interval"]
within_audio_list = []
list_length = len(text_list)
for index, text in enumerate(text_list):
params_dict["text"] = text
log_instance.info(
f"正在推理({str(index+1)}/{str(list_length)}):{speaker_name} -> {text}"
)
# 判断是否需要自动多语言切分
if language.lower() == "auto":
try:
del params_dict["language"]
except KeyError:
pass
audio = __generate_multilang_audio(**params_dict)
else:
params_dict["language"] = language
audio = __generate_single_audio(**params_dict)
# 将所有语音句子数据存入列表中
within_audio_list.append(audio)
# 插入静音数据
slient_audio = __generate_slient_audio(interval_time=within_interval)
within_audio_list.append(slient_audio)
# 删除最后一个静音数据
within_audio_list.pop()
# 将列表中的语音数据合成
audio_concat = np.concatenate(within_audio_list, axis=2)
return audio_concat
def __generate_multi_sentence(
text_list: List[str],
speaker_name: str,
language: str = "ZH",
sdp_ratio: float = SDP_RATIO,
noise_scale: float = NOISE,
noise_scale_w: float = NOISEW,
length_scale: float = LENGTH,
emotion: float = EMOTION,
seed: int = 114514,
within_interval: float = 0.5,
sentence_interval: float = 1.0,
) -> np.float32:
"""
根据多个句子生成语音
"""
# 获取局部变量
params_dict: dict = locals()
del params_dict["text_list"]
del params_dict["sentence_interval"]
sentence_audio_list = []
for whithin_text_list in text_list:
# 句子列表数据合成一个段落音频数据
params_dict["text_list"] = whithin_text_list
sentence_audio = __generate_multi_within(**params_dict)
sentence_audio_list.append(sentence_audio)
# 插入静音数据
slient_audio = __generate_slient_audio(interval_time=sentence_interval)
sentence_audio_list.append(slient_audio)
# 删除最后一个静音数据
sentence_audio_list.pop()
audio_concat = np.concatenate(sentence_audio_list, axis=2)
return audio_concat
def generate_tts_auto(
text: str,
speaker_name: str,
language: str = "ZH",
sdp_ratio: float = 0.2,
noise_scale: float = 0.6,
noise_scale_w: float = 0.8,
length_scale: float = 1.0,
emotion: int = 7,
seed: int = 114514,
within_interval: float = 0.5,
sentence_interval: float = 1.0,
paragraph_interval: float = 2.0,
) -> np.float32:
"""
自动切分,生成语音
"""
# 获取局部变量
params_dict: dict = locals()
del params_dict["text"]
del params_dict["paragraph_interval"]
# 根据文本进行按句子切分成三级列表
paragraph_sentences_text_list = text_split_to_sentence(text=text)
log_instance.debug(f"自动切分结果 {str(paragraph_sentences_text_list)}")
# 检测文本是否为空,为空直接返回空音频
if len(paragraph_sentences_text_list) == 0:
log_instance.warning("文本转语音推理失败:{speaker_name} -> {text} 文本内容不可为空。")
return __generate_empty_float32()
# 获取每一个段落所有句子的语音数据
paragraph_audio_list = []
for sentences_text_list in paragraph_sentences_text_list:
# 句子列表数据合成一个段落音频数据
params_dict["text_list"] = sentences_text_list
paragraph_audio = __generate_multi_sentence(**params_dict)
paragraph_audio_list.append(paragraph_audio)
# 插入静音数据
slient_audio = __generate_slient_audio(interval_time=paragraph_interval)
paragraph_audio_list.append(slient_audio)
# 删除最后一个静音数据
paragraph_audio_list.pop()
audio_concat = np.concatenate(paragraph_audio_list, axis=2)
return audio_concat
@dataclass
class InferHander:
single: Callable = None
auto: Callable = None
class GenerateTTS:
def __init__(self) -> None:
# 重建语音缓存文件夹
| rebuild_temp_dir(TEMP_PATH)
| 5 | 2023-12-21 13:50:50+00:00 | 8k |
haseeb-heaven/Gemini-Vision-Pro | script.py | [
{
"identifier": "Logger",
"path": "libs/logger.py",
"snippet": "class Logger:\n _logger = None\n\n @staticmethod\n def get_logger(file_name):\n if Logger._logger is None:\n Logger._logger = Logger._setup_logger(file_name)\n return Logger._logger\n\n @staticmethod\n def _setup_logger(file_name):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n\n # Create a file handler\n file_handler = logging.FileHandler(file_name)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n # Create a stream handler to print log messages on the console\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n\n # Add both handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n\n return logger"
},
{
"identifier": "GeminiVision",
"path": "libs/gemini_vision.py",
"snippet": "class GeminiVision:\n def __init__(self,api_key=None,temperature=0.1,top_p=1,top_k=32,max_output_tokens=4096) -> None:\n self.logger = Logger.get_logger('gemini_vision_pro.log')\n self.logger.info(f\"Initializing Gemini Vision\")\n self.model = None\n self.api_key = api_key\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.max_output_tokens = max_output_tokens\n \n self.logger.info(f\"temperature: {self.temperature}\")\n self.logger.info(f\"top_p: {self.top_p}\")\n self.logger.info(f\"top_k: {self.top_k}\")\n self.logger.info(f\"max_output_tokens: {self.max_output_tokens}\")\n \n if self.api_key is None:\n self.logger.error(\"API key is not initialized\")\n\n # load the key from the .env file\n load_dotenv()\n api_key = os.getenv(\"GEMINI_API_KEY\")\n if not api_key:\n self.logger.error(\"No API key found in the .env file\")\n raise ValueError(\"No API key found in the .env file\")\n \n self.logger.info(f\"Gemini Vision configured success\")\n genai.configure(api_key=api_key)\n \n self.logger.info(f\"Setting up model\")\n self.setup_model()\n self.logger.info(f\"Model setup success\")\n\n def setup_model(self):\n try:\n # Set up the model\n generation_config = {\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"top_k\": self.top_k,\n \"max_output_tokens\": self.max_output_tokens,\n }\n\n self.model = genai.GenerativeModel(model_name=\"gemini-pro-vision\",generation_config=generation_config)\n except Exception as e:\n self.logger.error(f\"Error setting up model: {e}\")\n raise\n\n def generate_content(self, contents):\n self.logger.info(f\"Generating contents\")\n \n # Check model and contents for errors.\n if self.model is None:\n self.logger.error(\"Model is not initialized\")\n raise ValueError(\"Model is not initialized\")\n\n if contents is None:\n self.logger.error(\"Contents is not initialized\")\n raise ValueError(\"Contents is not initialized\")\n \n # Print out the contents list for debugging\n self.logger.info(f\"Contents: {contents}\")\n \n return self.model.generate_content(contents=contents)"
},
{
"identifier": "SpeechToText",
"path": "libs/speech.py",
"snippet": "class SpeechToText:\n \"\"\"\n A class that represents a speech-to-text converter.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the recognizer and the microphone.\n \"\"\"\n self.recognizer = sr.Recognizer()\n self.microphone = sr.Microphone()\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO)\n\n def listen_and_convert(self):\n \"\"\"\n Listen to the microphone and convert the speech to text.\n \"\"\"\n try:\n self.logger.info(\"Listening to the microphone...\")\n with self.microphone as source:\n audio = self.recognizer.listen(source)\n self.logger.info(\"Converting speech to text...\")\n text = self.recognizer.recognize_google(audio)\n self.logger.info(f\"Converted text: {text}\")\n return text\n except sr.UnknownValueError:\n self.logger.error(\"Google Speech Recognition could not understand the audio\")\n except sr.RequestError as e:\n self.logger.error(f\"Could not request results from Google Speech Recognition service: {str(e)}\")"
},
{
"identifier": "TextToSpeech",
"path": "libs/voice.py",
"snippet": "class TextToSpeech:\n \"\"\"\n A class that represents a text-to-speech converter.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the logger.\n \"\"\"\n self.logger = logging.getLogger(__name__)\n logging.basicConfig(level=logging.INFO)\n\n def speak(self, text):\n \"\"\"\n Convert the given text to speech.\n \"\"\"\n try:\n self.logger.info(f\"Speaking the text: {text}\")\n tts = gTTS(text=text, lang='en')\n tts.save(\"speech.mp3\")\n os.system(\"mpg321 speech.mp3\")\n os.remove(\"speech.mp3\")\n except Exception as exception:\n self.logger.error(f\"An error occurred while trying to speak the text: {str(exception)}\")\n raise"
},
{
"identifier": "ImageCV2",
"path": "libs/image_cv2.py",
"snippet": "class ImageCV2:\n \n def __init__(self) -> None:\n # Set up logging\n self.logger = Logger.get_logger('gemini_vision.log')\n \n def open_webcam(self):\n cap = cv2.VideoCapture(0)\n if not cap.isOpened():\n self.logger.error(\"Cannot open webcam\")\n return None\n return cap\n\n def capture_image(self, cap):\n ret, frame = cap.read()\n self.logger.info(f\"Capturing image from webcam\")\n \n if not ret:\n self.logger.error(\"Cannot capture image\")\n return None\n\n self.logger.info(f\"Converting image PIL.Image\")\n # Convert the numpy.ndarray to a PIL.Image.Image\n image = Image.fromarray(frame)\n \n self.logger.info(f\"Converting image success\")\n return image\n \n def save_image(self, image, filename):\n self.logger.info(f\"Saving image to: {filename}\")\n \n # Convert the PIL.Image.Image back to a numpy.ndarray\n frame = np.array(image)\n \n # Save the image\n cv2.imwrite(filename, frame)\n \n def capture_image_from_webcam(self,image_name):\n self.logger.info(f\"Capturing image from webcam\")\n #time.sleep(5)\n \n cap = self.open_webcam()\n time.sleep(1)\n \n if cap is None:\n self.logger.error(\"Cannot open webcam\")\n return None\n\n image = self.capture_image(cap)\n \n # Check if frame is None\n if image is None:\n self.logger.error(\"Cannot capture image\")\n return None\n \n time.sleep(1)\n \n # Save the image\n self.save_image(image, image_name)\n self.logger.info(f\"Saved image to: {image_name}\")\n\n return image\n \n def show_webcam_feed(self):\n # Open the webcam (0 is the default webcam)\n cap = cv2.VideoCapture(0)\n\n while True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow('Webcam Feed', frame)\n\n # Break the loop on 'q' key press\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything is done, release the capture and destroy the window\n cap.release()\n cv2.destroyAllWindows()\n \n def stop_webcam_feed(self,interval):\n time.sleep(interval)"
}
] | import streamlit as st
import cv2
import io
import traceback
import traceback
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
from PIL import Image
from io import BytesIO
from pathlib import Path
from libs.logger import Logger
from libs.gemini_vision import GeminiVision
from libs.speech import SpeechToText
from libs.voice import TextToSpeech
from libs.image_cv2 import ImageCV2 | 4,022 | try:
return func(*args, **kwargs)
except Exception as exception:
st.session_state.logger.error(f"An error occurred in {func.__name__}: {exception}")
st.error(f"An error occurred: {exception}")
st.session_state.logger.error(traceback.format_exc())
st.stop()
return wrapper
@exception_handler
def validate_image(image_path):
if not image_path.exists():
st.session_state.logger.error(f"Could not find image: {image_path}")
raise FileNotFoundError(f"Could not find image: {image_path}")
@exception_handler
def process_image():
image_contents = [st.session_state['prompt'], st.session_state['captured_image']]
st.session_state.logger.info(f"Image data is: {st.session_state['captured_image']}")
response = st.session_state.gemini_vision.generate_content(image_contents)
if 'error' in response:
raise ValueError(f"An error occurred: {response}")
else:
if response.text:
st.session_state.tts.speak(response.text)
st.session_state.logger.info(f"Response: {response.text}")
st.session_state.response = response.text
@exception_handler
def get_prompt_from_mic():
prompt = st.session_state.stt.listen_and_convert()
return prompt
@exception_handler
def log_webrtc_context_states(webrtc_ctx):
if webrtc_ctx is not None:
# Log the state of the WebRTC context
st.session_state.logger.info(f"WebRTC context: {webrtc_ctx}")
st.session_state.logger.info(f"Is WebRTC playing: {webrtc_ctx.state.playing}")
st.session_state.logger.info(f"Is audio receiver ready: {webrtc_ctx.audio_receiver}")
st.session_state.logger.info(f"Is video receiver ready: {webrtc_ctx.video_receiver}")
else:
st.error("WebRTC context is None.")
@exception_handler
def capture_image():
st.session_state.logger.info("Attempting to capture image from webcam with ImageCV2...")
# Capture the image from the webcam
web_image = None
web_cam = ImageCV2()
web_image_file = "web_image.png"
web_image = web_cam.capture_image_from_webcam(web_image_file)
if web_image is None:
raise ValueError("Could not capture image from webcam")
# convert web_image from RGB to RGBA
web_image = web_image.convert("RGBA")
# Validate that an image is present
image_path = Path(web_image_file)
validate_image(image_path)
# Open the image
st.session_state.logger.info(f"Trying to open image: {web_image_file}")
web_image = Image.open(web_image_file)
return web_image
def display_support():
st.markdown("<div style='text-align: center;'>Share and Support</div>", unsafe_allow_html=True)
st.write("""
<div style="display: flex; flex-direction: column; align-items: center; justify-content: center;">
<ul style="list-style-type: none; margin: 0; padding: 0; display: flex;">
<li style="margin-right: 10px;"><a href="https://twitter.com/haseeb_heaven" target="_blank"><img src="https://img.icons8.com/color/32/000000/twitter--v1.png"/></a></li>
<li style="margin-right: 10px;"><a href="https://www.buymeacoffee.com/haseebheaven" target="_blank"><img src="https://img.icons8.com/color/32/000000/coffee-to-go--v1.png"/></a></li>
<li style="margin-right: 10px;"><a href="https://www.youtube.com/@HaseebHeaven/videos" target="_blank"><img src="https://img.icons8.com/color/32/000000/youtube-play.png"/></a></li>
<li><a href="https://github.com/haseeb-heaven/LangChain-Coder" target="_blank"><img src="https://img.icons8.com/color/32/000000/github--v1.png"/></a></li>
</ul>
</div>
""", unsafe_allow_html=True)
# Streamlit App
def streamlit_app():
# Google Logo and Title
st.write('<div style="display: flex; flex-direction: row; align-items: center; justify-content: center;"><a style="margin-right: 10px;" href="https://www.google.com" target="_blank"><img src="https://img.icons8.com/color/32/000000/google-logo.png"/></a><h1 style="margin-left: 10px;">Google - Gemini Vision</h1></div>', unsafe_allow_html=True)
# Display support
display_support()
# Initialize logger
if st.session_state.logger is None:
st.session_state.logger = Logger.get_logger('gemini_vision_pro.log')
# Display the Gemini Sidebar settings
with st.sidebar.title("Gemini Settings"):
st.session_state.api_key = st.sidebar.text_input("API Key", type="password")
st.session_state.temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.3)
st.session_state.top_k = st.sidebar.number_input("Top K", value=32)
st.session_state.top_p = st.sidebar.slider("Top P", 0.0, 1.0, 1.0)
st.session_state.gemini_vision = GeminiVision(st.session_state.api_key, st.session_state.temperature, st.session_state.top_p, st.session_state.top_k)
if (st.session_state.api_key is not None and st.session_state.api_key != '') \
and (st.session_state.temperature is not None and st.session_state.temperature != '') \
and (st.session_state.top_k is not None and st.session_state.top_k != '') \
and (st.session_state.top_p is not None and st.session_state.top_p != ''):
st.toast("Settings updated successfully!", icon="👍")
else:
st.toast("Please enter all the settings.\nAPI Key is required", icon="❌")
raise ValueError("Please enter all values the settings.\nAPI Key is required")
# Initialize services once
if st.session_state.tts is None:
st.session_state.tts = TextToSpeech()
if st.session_state.stt is None:
| """
Description: This is the amazing Google Gemini Vision Pro.
This scans the image and using Gemini AI pro vision API it generates the descrption of the image.
It also uses the speech to text and text to speech to speak the prompt and display the description of the image.
It also uses the webcam to capture the image and display it.
Features:
1. Webcam detection using WebRTC, OpenCV and PIL
2. Speech to text using Google Cloud Speech to Text API
3. Text to speech using Google Cloud Text to Speech API
4. Image processing using Gemini AI Pro Vision API
5. Logging using Python logging module
6. Error handling using Python exception handling
Modules used:
1. Streamlit - Is is the Web App framework used to build the app
2. Streamlit Webrtc - It is used to capture the image from the webcam
3. OpenCV - It is used to capture the image from the webcam
4. PIL - It is image processing library used to convert the image.
5. gTTS - It is used to convert the text to speech
6. SpeechRecognition - It is used to convert the speech to text
7. google.cloud.speech - It is used to convert the speech to text
Author: HeavenHM
Date: 17-12-2023
Version: 1.0
"""
# Initialize session state
def init_session_state():
if 'api_key' not in st.session_state:
st.session_state['api_key'] = ''
if 'temperature' not in st.session_state:
st.session_state['temperature'] = 0.1
if 'top_k' not in st.session_state:
st.session_state['top_k'] = 32
if 'top_p' not in st.session_state:
st.session_state['top_p'] = 1.0
if 'captured_image' not in st.session_state:
st.session_state['captured_image'] = None
if 'prompt' not in st.session_state:
st.session_state['prompt'] = ''
if 'api_key' not in st.session_state:
st.session_state['api_key'] = ''
if 'captured_image' not in st.session_state:
st.session_state['captured_image'] = None
if 'prompt' not in st.session_state:
st.session_state['prompt'] = ''
if "logger" not in st.session_state:
st.session_state["logger"] = None
if "tts" not in st.session_state:
st.session_state["tts"] = None
if "stt" not in st.session_state:
st.session_state["stt"] = None
if "gemini_vision" not in st.session_state:
st.session_state["gemini_vision"] = None
if "webrtc_ctx" not in st.session_state:
st.session_state["webrtc_ctx"] = None
if "response" not in st.session_state:
st.session_state["response"] = None
# Exception handling decorator
def exception_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exception:
st.session_state.logger.error(f"An error occurred in {func.__name__}: {exception}")
st.error(f"An error occurred: {exception}")
st.session_state.logger.error(traceback.format_exc())
st.stop()
return wrapper
@exception_handler
def validate_image(image_path):
if not image_path.exists():
st.session_state.logger.error(f"Could not find image: {image_path}")
raise FileNotFoundError(f"Could not find image: {image_path}")
@exception_handler
def process_image():
image_contents = [st.session_state['prompt'], st.session_state['captured_image']]
st.session_state.logger.info(f"Image data is: {st.session_state['captured_image']}")
response = st.session_state.gemini_vision.generate_content(image_contents)
if 'error' in response:
raise ValueError(f"An error occurred: {response}")
else:
if response.text:
st.session_state.tts.speak(response.text)
st.session_state.logger.info(f"Response: {response.text}")
st.session_state.response = response.text
@exception_handler
def get_prompt_from_mic():
prompt = st.session_state.stt.listen_and_convert()
return prompt
@exception_handler
def log_webrtc_context_states(webrtc_ctx):
if webrtc_ctx is not None:
# Log the state of the WebRTC context
st.session_state.logger.info(f"WebRTC context: {webrtc_ctx}")
st.session_state.logger.info(f"Is WebRTC playing: {webrtc_ctx.state.playing}")
st.session_state.logger.info(f"Is audio receiver ready: {webrtc_ctx.audio_receiver}")
st.session_state.logger.info(f"Is video receiver ready: {webrtc_ctx.video_receiver}")
else:
st.error("WebRTC context is None.")
@exception_handler
def capture_image():
st.session_state.logger.info("Attempting to capture image from webcam with ImageCV2...")
# Capture the image from the webcam
web_image = None
web_cam = ImageCV2()
web_image_file = "web_image.png"
web_image = web_cam.capture_image_from_webcam(web_image_file)
if web_image is None:
raise ValueError("Could not capture image from webcam")
# convert web_image from RGB to RGBA
web_image = web_image.convert("RGBA")
# Validate that an image is present
image_path = Path(web_image_file)
validate_image(image_path)
# Open the image
st.session_state.logger.info(f"Trying to open image: {web_image_file}")
web_image = Image.open(web_image_file)
return web_image
def display_support():
st.markdown("<div style='text-align: center;'>Share and Support</div>", unsafe_allow_html=True)
st.write("""
<div style="display: flex; flex-direction: column; align-items: center; justify-content: center;">
<ul style="list-style-type: none; margin: 0; padding: 0; display: flex;">
<li style="margin-right: 10px;"><a href="https://twitter.com/haseeb_heaven" target="_blank"><img src="https://img.icons8.com/color/32/000000/twitter--v1.png"/></a></li>
<li style="margin-right: 10px;"><a href="https://www.buymeacoffee.com/haseebheaven" target="_blank"><img src="https://img.icons8.com/color/32/000000/coffee-to-go--v1.png"/></a></li>
<li style="margin-right: 10px;"><a href="https://www.youtube.com/@HaseebHeaven/videos" target="_blank"><img src="https://img.icons8.com/color/32/000000/youtube-play.png"/></a></li>
<li><a href="https://github.com/haseeb-heaven/LangChain-Coder" target="_blank"><img src="https://img.icons8.com/color/32/000000/github--v1.png"/></a></li>
</ul>
</div>
""", unsafe_allow_html=True)
# Streamlit App
def streamlit_app():
# Google Logo and Title
st.write('<div style="display: flex; flex-direction: row; align-items: center; justify-content: center;"><a style="margin-right: 10px;" href="https://www.google.com" target="_blank"><img src="https://img.icons8.com/color/32/000000/google-logo.png"/></a><h1 style="margin-left: 10px;">Google - Gemini Vision</h1></div>', unsafe_allow_html=True)
# Display support
display_support()
# Initialize logger
if st.session_state.logger is None:
st.session_state.logger = Logger.get_logger('gemini_vision_pro.log')
# Display the Gemini Sidebar settings
with st.sidebar.title("Gemini Settings"):
st.session_state.api_key = st.sidebar.text_input("API Key", type="password")
st.session_state.temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.3)
st.session_state.top_k = st.sidebar.number_input("Top K", value=32)
st.session_state.top_p = st.sidebar.slider("Top P", 0.0, 1.0, 1.0)
st.session_state.gemini_vision = GeminiVision(st.session_state.api_key, st.session_state.temperature, st.session_state.top_p, st.session_state.top_k)
if (st.session_state.api_key is not None and st.session_state.api_key != '') \
and (st.session_state.temperature is not None and st.session_state.temperature != '') \
and (st.session_state.top_k is not None and st.session_state.top_k != '') \
and (st.session_state.top_p is not None and st.session_state.top_p != ''):
st.toast("Settings updated successfully!", icon="👍")
else:
st.toast("Please enter all the settings.\nAPI Key is required", icon="❌")
raise ValueError("Please enter all values the settings.\nAPI Key is required")
# Initialize services once
if st.session_state.tts is None:
st.session_state.tts = TextToSpeech()
if st.session_state.stt is None: | st.session_state.stt = SpeechToText() | 2 | 2023-12-16 23:24:46+00:00 | 8k |
lipku/metahuman-stream | nerf_triplane/provider.py | [
{
"identifier": "get_audio_features",
"path": "nerf_triplane/utils.py",
"snippet": "def get_audio_features(features, att_mode, index):\n if att_mode == 0:\n return features[[index]]\n elif att_mode == 1:\n left = index - 8\n pad_left = 0\n if left < 0:\n pad_left = -left\n left = 0\n auds = features[left:index]\n if pad_left > 0:\n # pad may be longer than auds, so do not use zeros_like\n auds = torch.cat([torch.zeros(pad_left, *auds.shape[1:], device=auds.device, dtype=auds.dtype), auds], dim=0)\n return auds\n elif att_mode == 2:\n left = index - 4\n right = index + 4\n pad_left = 0\n pad_right = 0\n if left < 0:\n pad_left = -left\n left = 0\n if right > features.shape[0]:\n pad_right = right - features.shape[0]\n right = features.shape[0]\n auds = features[left:right]\n if pad_left > 0:\n auds = torch.cat([torch.zeros_like(auds[:pad_left]), auds], dim=0)\n if pad_right > 0:\n auds = torch.cat([auds, torch.zeros_like(auds[:pad_right])], dim=0) # [8, 16]\n return auds\n else:\n raise NotImplementedError(f'wrong att_mode: {att_mode}')"
},
{
"identifier": "get_rays",
"path": "nerf_triplane/utils.py",
"snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef get_rays(poses, intrinsics, H, W, N=-1, patch_size=1, rect=None):\n ''' get rays\n Args:\n poses: [B, 4, 4], cam2world\n intrinsics: [4]\n H, W, N: int\n Returns:\n rays_o, rays_d: [B, N, 3]\n inds: [B, N]\n '''\n\n device = poses.device\n B = poses.shape[0]\n fx, fy, cx, cy = intrinsics\n\n if rect is not None:\n xmin, xmax, ymin, ymax = rect\n N = (xmax - xmin) * (ymax - ymin)\n\n i, j = custom_meshgrid(torch.linspace(0, W-1, W, device=device), torch.linspace(0, H-1, H, device=device)) # float\n i = i.t().reshape([1, H*W]).expand([B, H*W]) + 0.5\n j = j.t().reshape([1, H*W]).expand([B, H*W]) + 0.5\n\n results = {}\n\n if N > 0:\n N = min(N, H*W)\n\n if patch_size > 1:\n\n # random sample left-top cores.\n # NOTE: this impl will lead to less sampling on the image corner pixels... but I don't have other ideas.\n num_patch = N // (patch_size ** 2)\n inds_x = torch.randint(0, H - patch_size, size=[num_patch], device=device)\n inds_y = torch.randint(0, W - patch_size, size=[num_patch], device=device)\n inds = torch.stack([inds_x, inds_y], dim=-1) # [np, 2]\n\n # create meshgrid for each patch\n pi, pj = custom_meshgrid(torch.arange(patch_size, device=device), torch.arange(patch_size, device=device))\n offsets = torch.stack([pi.reshape(-1), pj.reshape(-1)], dim=-1) # [p^2, 2]\n\n inds = inds.unsqueeze(1) + offsets.unsqueeze(0) # [np, p^2, 2]\n inds = inds.view(-1, 2) # [N, 2]\n inds = inds[:, 0] * W + inds[:, 1] # [N], flatten\n\n inds = inds.expand([B, N])\n \n # only get rays in the specified rect\n elif rect is not None:\n # assert B == 1\n mask = torch.zeros(H, W, dtype=torch.bool, device=device)\n xmin, xmax, ymin, ymax = rect\n mask[xmin:xmax, ymin:ymax] = 1\n inds = torch.where(mask.view(-1))[0] # [nzn]\n inds = inds.unsqueeze(0) # [1, N]\n\n else:\n inds = torch.randint(0, H*W, size=[N], device=device) # may duplicate\n inds = inds.expand([B, N])\n\n i = torch.gather(i, -1, inds)\n j = torch.gather(j, -1, inds)\n\n\n else:\n inds = torch.arange(H*W, device=device).expand([B, H*W])\n \n results['i'] = i\n results['j'] = j\n results['inds'] = inds\n\n zs = torch.ones_like(i)\n xs = (i - cx) / fx * zs\n ys = (j - cy) / fy * zs\n directions = torch.stack((xs, ys, zs), dim=-1)\n directions = directions / torch.norm(directions, dim=-1, keepdim=True)\n \n rays_d = directions @ poses[:, :3, :3].transpose(-1, -2) # (B, N, 3)\n \n rays_o = poses[..., :3, 3] # [B, 3]\n rays_o = rays_o[..., None, :].expand_as(rays_d) # [B, N, 3]\n\n results['rays_o'] = rays_o\n results['rays_d'] = rays_d\n\n return results"
},
{
"identifier": "get_bg_coords",
"path": "nerf_triplane/utils.py",
"snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef get_bg_coords(H, W, device):\n X = torch.arange(H, device=device) / (H - 1) * 2 - 1 # in [-1, 1]\n Y = torch.arange(W, device=device) / (W - 1) * 2 - 1 # in [-1, 1]\n xs, ys = custom_meshgrid(X, Y)\n bg_coords = torch.cat([xs.reshape(-1, 1), ys.reshape(-1, 1)], dim=-1).unsqueeze(0) # [1, H*W, 2], in [-1, 1]\n return bg_coords"
},
{
"identifier": "convert_poses",
"path": "nerf_triplane/utils.py",
"snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef convert_poses(poses):\n # poses: [B, 4, 4]\n # return [B, 3], 4 rot, 3 trans\n out = torch.empty(poses.shape[0], 6, dtype=torch.float32, device=poses.device)\n out[:, :3] = matrix_to_euler_angles(poses[:, :3, :3])\n out[:, 3:] = poses[:, :3, 3]\n return out"
}
] | import os
import cv2
import glob
import json
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import trimesh
import torch
import torch.nn.functional as F
import pandas as pd
from scipy.spatial.transform import Slerp, Rotation
from torch.utils.data import DataLoader
from .utils import get_audio_features, get_rays, get_bg_coords, convert_poses | 4,224 | aud_features = aud_features.long()
print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')
self.poses = []
self.auds = []
self.eye_area = []
for f in tqdm.tqdm(frames, desc=f'Loading data'):
pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]
pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)
self.poses.append(pose)
# find the corresponding audio to the image frame
if not self.opt.asr and self.opt.aud == '':
aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...
self.auds.append(aud)
if self.opt.exp_eye:
if 'eye_ratio' in f:
area = f['eye_ratio']
else:
area = 0.25 # default value for opened eye
self.eye_area.append(area)
# load pre-extracted background image (should be the same size as training image...)
if self.opt.bg_img == 'white': # special
bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)
elif self.opt.bg_img == 'black': # special
bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)
else: # load from file
bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]
if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:
bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)
bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)
bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]
self.bg_img = bg_img
self.poses = np.stack(self.poses, axis=0)
# smooth camera path...
if self.opt.smooth_path:
self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)
self.poses = torch.from_numpy(self.poses) # [N, 4, 4]
if self.opt.asr:
# live streaming, no pre-calculated auds
self.auds = None
else:
# auds corresponding to images
if self.opt.aud == '':
self.auds = torch.stack(self.auds, dim=0) # [N, 32, 16]
# auds is novel, may have a different length with images
else:
self.auds = aud_features
self.bg_img = torch.from_numpy(self.bg_img)
if self.opt.exp_eye:
self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]
print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')
if self.opt.smooth_eye:
# naive 5 window average
ori_eye = self.eye_area.copy()
for i in range(ori_eye.shape[0]):
start = max(0, i - 1)
end = min(ori_eye.shape[0], i + 2)
self.eye_area[i] = ori_eye[start:end].mean()
self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]
# always preload
self.poses = self.poses.to(self.device)
if self.auds is not None:
self.auds = self.auds.to(self.device)
self.bg_img = self.bg_img.to(torch.half).to(self.device)
if self.opt.exp_eye:
self.eye_area = self.eye_area.to(self.device)
# load intrinsics
fl_x = fl_y = transform['focal_len']
cx = (transform['cx'] / downscale)
cy = (transform['cy'] / downscale)
self.intrinsics = np.array([fl_x, fl_y, cx, cy])
# directly build the coordinate meshgrid in [-1, 1]^2
self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]
def mirror_index(self, index):
size = self.poses.shape[0]
turn = index // size
res = index % size
if turn % 2 == 0:
return res
else:
return size - res - 1
def collate(self, index):
B = len(index) # a list of length 1
# assert B == 1
results = {}
# audio use the original index
if self.auds is not None:
|
# ref: https://github.com/NVlabs/instant-ngp/blob/b76004c8cf478880227401ae763be4c02f80b62f/include/neural-graphics-primitives/nerf_loader.h#L50
def nerf_matrix_to_ngp(pose, scale=0.33, offset=[0, 0, 0]):
new_pose = np.array([
[pose[1, 0], -pose[1, 1], -pose[1, 2], pose[1, 3] * scale + offset[0]],
[pose[2, 0], -pose[2, 1], -pose[2, 2], pose[2, 3] * scale + offset[1]],
[pose[0, 0], -pose[0, 1], -pose[0, 2], pose[0, 3] * scale + offset[2]],
[0, 0, 0, 1],
], dtype=np.float32)
return new_pose
def smooth_camera_path(poses, kernel_size=5):
# smooth the camera trajectory...
# poses: [N, 4, 4], numpy array
N = poses.shape[0]
K = kernel_size // 2
trans = poses[:, :3, 3].copy() # [N, 3]
rots = poses[:, :3, :3].copy() # [N, 3, 3]
for i in range(N):
start = max(0, i - K)
end = min(N, i + K + 1)
poses[i, :3, 3] = trans[start:end].mean(0)
poses[i, :3, :3] = Rotation.from_matrix(rots[start:end]).mean().as_matrix()
return poses
def polygon_area(x, y):
x_ = x - x.mean()
y_ = y - y.mean()
correction = x_[-1] * y_[0] - y_[-1]* x_[0]
main_area = np.dot(x_[:-1], y_[1:]) - np.dot(y_[:-1], x_[1:])
return 0.5 * np.abs(main_area + correction)
def visualize_poses(poses, size=0.1):
# poses: [B, 4, 4]
print(f'[INFO] visualize poses: {poses.shape}')
axes = trimesh.creation.axis(axis_length=4)
box = trimesh.primitives.Box(extents=(2, 2, 2)).as_outline()
box.colors = np.array([[128, 128, 128]] * len(box.entities))
objects = [axes, box]
for pose in poses:
# a camera is visualized with 8 line segments.
pos = pose[:3, 3]
a = pos + size * pose[:3, 0] + size * pose[:3, 1] + size * pose[:3, 2]
b = pos - size * pose[:3, 0] + size * pose[:3, 1] + size * pose[:3, 2]
c = pos - size * pose[:3, 0] - size * pose[:3, 1] + size * pose[:3, 2]
d = pos + size * pose[:3, 0] - size * pose[:3, 1] + size * pose[:3, 2]
dir = (a + b + c + d) / 4 - pos
dir = dir / (np.linalg.norm(dir) + 1e-8)
o = pos + dir * 3
segs = np.array([[pos, a], [pos, b], [pos, c], [pos, d], [a, b], [b, c], [c, d], [d, a], [pos, o]])
segs = trimesh.load_path(segs)
objects.append(segs)
trimesh.Scene(objects).show()
class NeRFDataset_Test:
def __init__(self, opt, device, downscale=1):
super().__init__()
self.opt = opt
self.device = device
self.downscale = downscale
self.scale = opt.scale # camera radius scale to make sure camera are inside the bounding box.
self.offset = opt.offset # camera offset
self.bound = opt.bound # bounding box half length, also used as the radius to random sample poses.
self.fp16 = opt.fp16
self.start_index = opt.data_range[0]
self.end_index = opt.data_range[1]
self.training = False
self.num_rays = -1
# load nerf-compatible format data.
with open(opt.pose, 'r') as f:
transform = json.load(f)
# load image size
self.H = int(transform['cy']) * 2 // downscale
self.W = int(transform['cx']) * 2 // downscale
# read images
frames = transform["frames"]
# use a slice of the dataset
if self.end_index == -1: # abuse...
self.end_index = len(frames)
frames = frames[self.start_index:self.end_index]
print(f'[INFO] load {len(frames)} frames.')
# only load pre-calculated aud features when not live-streaming
if not self.opt.asr:
aud_features = np.load(self.opt.aud)
aud_features = torch.from_numpy(aud_features)
# support both [N, 16] labels and [N, 16, K] logits
if len(aud_features.shape) == 3:
aud_features = aud_features.float().permute(0, 2, 1) # [N, 16, 29] --> [N, 29, 16]
if self.opt.emb:
print(f'[INFO] argmax to aud features {aud_features.shape} for --emb mode')
aud_features = aud_features.argmax(1) # [N, 16]
else:
assert self.opt.emb, "aud only provide labels, must use --emb"
aud_features = aud_features.long()
print(f'[INFO] load {self.opt.aud} aud_features: {aud_features.shape}')
self.poses = []
self.auds = []
self.eye_area = []
for f in tqdm.tqdm(frames, desc=f'Loading data'):
pose = np.array(f['transform_matrix'], dtype=np.float32) # [4, 4]
pose = nerf_matrix_to_ngp(pose, scale=self.scale, offset=self.offset)
self.poses.append(pose)
# find the corresponding audio to the image frame
if not self.opt.asr and self.opt.aud == '':
aud = aud_features[min(f['aud_id'], aud_features.shape[0] - 1)] # careful for the last frame...
self.auds.append(aud)
if self.opt.exp_eye:
if 'eye_ratio' in f:
area = f['eye_ratio']
else:
area = 0.25 # default value for opened eye
self.eye_area.append(area)
# load pre-extracted background image (should be the same size as training image...)
if self.opt.bg_img == 'white': # special
bg_img = np.ones((self.H, self.W, 3), dtype=np.float32)
elif self.opt.bg_img == 'black': # special
bg_img = np.zeros((self.H, self.W, 3), dtype=np.float32)
else: # load from file
bg_img = cv2.imread(self.opt.bg_img, cv2.IMREAD_UNCHANGED) # [H, W, 3]
if bg_img.shape[0] != self.H or bg_img.shape[1] != self.W:
bg_img = cv2.resize(bg_img, (self.W, self.H), interpolation=cv2.INTER_AREA)
bg_img = cv2.cvtColor(bg_img, cv2.COLOR_BGR2RGB)
bg_img = bg_img.astype(np.float32) / 255 # [H, W, 3/4]
self.bg_img = bg_img
self.poses = np.stack(self.poses, axis=0)
# smooth camera path...
if self.opt.smooth_path:
self.poses = smooth_camera_path(self.poses, self.opt.smooth_path_window)
self.poses = torch.from_numpy(self.poses) # [N, 4, 4]
if self.opt.asr:
# live streaming, no pre-calculated auds
self.auds = None
else:
# auds corresponding to images
if self.opt.aud == '':
self.auds = torch.stack(self.auds, dim=0) # [N, 32, 16]
# auds is novel, may have a different length with images
else:
self.auds = aud_features
self.bg_img = torch.from_numpy(self.bg_img)
if self.opt.exp_eye:
self.eye_area = np.array(self.eye_area, dtype=np.float32) # [N]
print(f'[INFO] eye_area: {self.eye_area.min()} - {self.eye_area.max()}')
if self.opt.smooth_eye:
# naive 5 window average
ori_eye = self.eye_area.copy()
for i in range(ori_eye.shape[0]):
start = max(0, i - 1)
end = min(ori_eye.shape[0], i + 2)
self.eye_area[i] = ori_eye[start:end].mean()
self.eye_area = torch.from_numpy(self.eye_area).view(-1, 1) # [N, 1]
# always preload
self.poses = self.poses.to(self.device)
if self.auds is not None:
self.auds = self.auds.to(self.device)
self.bg_img = self.bg_img.to(torch.half).to(self.device)
if self.opt.exp_eye:
self.eye_area = self.eye_area.to(self.device)
# load intrinsics
fl_x = fl_y = transform['focal_len']
cx = (transform['cx'] / downscale)
cy = (transform['cy'] / downscale)
self.intrinsics = np.array([fl_x, fl_y, cx, cy])
# directly build the coordinate meshgrid in [-1, 1]^2
self.bg_coords = get_bg_coords(self.H, self.W, self.device) # [1, H*W, 2] in [-1, 1]
def mirror_index(self, index):
size = self.poses.shape[0]
turn = index // size
res = index % size
if turn % 2 == 0:
return res
else:
return size - res - 1
def collate(self, index):
B = len(index) # a list of length 1
# assert B == 1
results = {}
# audio use the original index
if self.auds is not None: | auds = get_audio_features(self.auds, self.opt.att, index[0]).to(self.device) | 0 | 2023-12-19 01:32:46+00:00 | 8k |
MingtaoGuo/AnimateAnyone_unofficial | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the function to evaluate.\n :param inputs: the argument sequence to pass to `func`.\n :param params: a sequence of parameters `func` depends on but does not\n explicitly take as arguments.\n :param flag: if False, disable gradient checkpointing.\n \"\"\"\n if flag:\n args = tuple(inputs) + tuple(params)\n return CheckpointFunction.apply(func, len(inputs), *args)\n else:\n return func(*inputs)"
},
{
"identifier": "conv_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "linear",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)"
},
{
"identifier": "avg_pool_nd",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1d(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2d(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")"
},
{
"identifier": "zero_module",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module"
},
{
"identifier": "normalization",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)"
},
{
"identifier": "timestep_embedding",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding"
},
{
"identifier": "SpatialSelfAttention",
"path": "ldm/modules/attention.py",
"snippet": "class SpatialSelfAttention(nn.Module):\n def __init__(self, in_channels):\n super().__init__()\n self.in_channels = in_channels\n\n self.norm = Normalize(in_channels)\n self.q = torch.nn.Conv2d(in_channels,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n self.k = torch.nn.Conv2d(in_channels,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n self.v = torch.nn.Conv2d(in_channels,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n self.proj_out = torch.nn.Conv2d(in_channels,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n\n def forward(self, x):\n h_ = x\n h_ = self.norm(h_)\n q = self.q(h_)\n k = self.k(h_)\n v = self.v(h_)\n\n # compute attention\n b,c,h,w = q.shape\n q = rearrange(q, 'b c h w -> b (h w) c')\n k = rearrange(k, 'b c h w -> b c (h w)')\n w_ = torch.einsum('bij,bjk->bik', q, k)\n\n w_ = w_ * (int(c)**(-0.5))\n w_ = torch.nn.functional.softmax(w_, dim=2)\n\n # attend to values\n v = rearrange(v, 'b c h w -> b c (h w)')\n w_ = rearrange(w_, 'b i j -> b j i')\n h_ = torch.einsum('bij,bjk->bik', v, w_)\n h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)\n h_ = self.proj_out(h_)\n\n return x+h_"
},
{
"identifier": "SpatialTransformer",
"path": "ldm/modules/attention.py",
"snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in"
},
{
"identifier": "exists",
"path": "ldm/util.py",
"snippet": "def exists(x):\n return x is not None"
}
] | from abc import abstractmethod
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialSelfAttention, SpatialTransformer
from ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 3,750 | def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, ResBlockNoTime):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout), | zero_module( | 4 | 2023-12-16 03:31:33+00:00 | 8k |
yasserben/CLOUDS | demo.py | [
{
"identifier": "add_maskformer2_config",
"path": "clouds/config.py",
"snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]"
},
{
"identifier": "add_clouds_config",
"path": "clouds/config.py",
"snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100"
},
{
"identifier": "VisualizationDemo",
"path": "predictor.py",
"snippet": "class VisualizationDemo(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n parallel (bool): whether to run the model in different processes from visualization.\n Useful since the visualization logic can be slow.\n \"\"\"\n\n coco_metadata = MetadataCatalog.get(\n \"openvocab_coco_2017_val_panoptic_with_sem_seg\"\n )\n ade20k_metadata = MetadataCatalog.get(\"openvocab_ade20k_panoptic_val\")\n lvis_classes = (\n open(\"clouds/data/datasets/lvis_1203_with_prompt_eng.txt\", \"r\")\n .read()\n .splitlines()\n )\n lvis_classes = [x[x.find(\":\") + 1 :] for x in lvis_classes]\n lvis_colors = list(\n itertools.islice(\n itertools.cycle(coco_metadata.stuff_colors), len(lvis_classes)\n )\n )\n # rerrange to thing_classes, stuff_classes\n coco_thing_classes = coco_metadata.thing_classes\n coco_stuff_classes = [\n x for x in coco_metadata.stuff_classes if x not in coco_thing_classes\n ]\n coco_thing_colors = coco_metadata.thing_colors\n coco_stuff_colors = [\n x for x in coco_metadata.stuff_colors if x not in coco_thing_colors\n ]\n ade20k_thing_classes = ade20k_metadata.thing_classes\n ade20k_stuff_classes = [\n x for x in ade20k_metadata.stuff_classes if x not in ade20k_thing_classes\n ]\n ade20k_thing_colors = ade20k_metadata.thing_colors\n ade20k_stuff_colors = [\n x for x in ade20k_metadata.stuff_colors if x not in ade20k_thing_colors\n ]\n\n user_classes = []\n user_colors = [\n random_color(rgb=True, maximum=1) for _ in range(len(user_classes))\n ]\n\n stuff_classes = coco_stuff_classes + ade20k_stuff_classes\n stuff_colors = coco_stuff_colors + ade20k_stuff_colors\n thing_classes = (\n user_classes + coco_thing_classes + ade20k_thing_classes + lvis_classes\n )\n thing_colors = (\n user_colors + coco_thing_colors + ade20k_thing_colors + lvis_colors\n )\n\n thing_dataset_id_to_contiguous_id = {x: x for x in range(len(thing_classes))}\n DatasetCatalog.register(\"openvocab_dataset\", lambda x: [])\n self.metadata = MetadataCatalog.get(\"openvocab_dataset\").set(\n stuff_classes=thing_classes + stuff_classes,\n stuff_colors=thing_colors + stuff_colors,\n thing_dataset_id_to_contiguous_id=thing_dataset_id_to_contiguous_id,\n )\n # print(\"self.metadata:\", self.metadata)\n self.cpu_device = torch.device(\"cpu\")\n self.instance_mode = instance_mode\n\n self.parallel = parallel\n if parallel:\n num_gpu = torch.cuda.device_count()\n self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)\n else:\n self.predictor = DefaultPredictor(cfg)\n self.predictor.set_metadata(self.metadata)\n\n def run_on_image(self, image):\n \"\"\"\n Args:\n image (np.ndarray): an image of shape (H, W, C) (in BGR order).\n This is the format used by OpenCV.\n Returns:\n predictions (dict): the output of the model.\n vis_output (VisImage): the visualized image output.\n \"\"\"\n vis_output = None\n predictions = self.predictor(image)\n # Convert image from OpenCV BGR format to Matplotlib RGB format.\n image = image[:, :, ::-1]\n visualizer = OpenVocabVisualizer(\n image, self.metadata, instance_mode=self.instance_mode\n )\n if \"panoptic_seg\" in predictions:\n panoptic_seg, segments_info = predictions[\"panoptic_seg\"]\n vis_output = visualizer.draw_panoptic_seg(\n panoptic_seg.to(self.cpu_device), segments_info\n )\n else:\n if \"sem_seg\" in predictions:\n vis_output = visualizer.draw_sem_seg(\n predictions[\"sem_seg\"].argmax(dim=0).to(self.cpu_device)\n )\n if \"instances\" in predictions:\n instances = predictions[\"instances\"].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n\n return predictions, vis_output\n\n def _frame_from_video(self, video):\n while video.isOpened():\n success, frame = video.read()\n if success:\n yield frame\n else:\n break"
}
] | import argparse
import glob
import multiprocessing as mp
import os
import sys
import tempfile
import time
import warnings
import cv2
import numpy as np
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.utils.logger import setup_logger
from clouds import add_maskformer2_config, add_clouds_config
from predictor import VisualizationDemo | 3,771 | """
Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/demo/demo.py
"""
# fmt: off
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
# constants
WINDOW_NAME = "clouds demo"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
add_clouds_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="clouds demo for builtin configs")
parser.add_argument(
"--config-file",
default="../configs/coco/segmentation/clouds/clouds_convnext_large_eval_cityscapes.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--webcam", action="store_true", help="Take inputs from webcam."
)
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
| """
Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/demo/demo.py
"""
# fmt: off
sys.path.insert(1, os.path.join(sys.path[0], '..'))
# fmt: on
# constants
WINDOW_NAME = "clouds demo"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_deeplab_config(cfg)
add_maskformer2_config(cfg)
add_clouds_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="clouds demo for builtin configs")
parser.add_argument(
"--config-file",
default="../configs/coco/segmentation/clouds/clouds_convnext_large_eval_cityscapes.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"--webcam", action="store_true", help="Take inputs from webcam."
)
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
| demo = VisualizationDemo(cfg) | 2 | 2023-12-15 15:40:58+00:00 | 8k |
mattcar15/bambu-connect | bambu_connect/BambuClient.py | [
{
"identifier": "CameraClient",
"path": "bambu_connect/CameraClient.py",
"snippet": "class CameraClient:\n def __init__(self, hostname, access_code, port=6000):\n self.hostname = hostname\n self.port = port\n self.username = \"bblp\"\n self.auth_packet = self.__create_auth_packet__(self.username, access_code)\n self.streaming = False\n self.stream_thread = None\n\n def __create_auth_packet__(self, username, access_code):\n d = bytearray()\n d += struct.pack(\"IIL\", 0x40, 0x3000, 0x0)\n d += username.encode(\"ascii\").ljust(32, b\"\\x00\")\n d += access_code.encode(\"ascii\").ljust(32, b\"\\x00\")\n return d\n\n def __find_jpeg__(self, buf, start_marker, end_marker):\n start = buf.find(start_marker)\n end = buf.find(end_marker, start + len(start_marker))\n if start != -1 and end != -1:\n return buf[start : end + len(end_marker)], buf[end + len(end_marker) :]\n return None, buf\n\n def capture_frame(self):\n ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n jpeg_start = bytearray.fromhex(\"ff d8 ff e0\")\n jpeg_end = bytearray.fromhex(\"ff d9\")\n read_chunk_size = 1024\n\n with socket.create_connection((self.hostname, self.port)) as sock:\n with ctx.wrap_socket(sock, server_hostname=self.hostname) as ssock:\n ssock.write(self.auth_packet)\n buf = bytearray()\n while True:\n dr = ssock.recv(read_chunk_size)\n if not dr:\n break\n buf += dr\n img, buf = self.__find_jpeg__(buf, jpeg_start, jpeg_end)\n if img:\n return bytes(img)\n\n def capture_stream(self, img_callback):\n ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n jpeg_start = bytearray.fromhex(\"ff d8 ff e0\")\n jpeg_end = bytearray.fromhex(\"ff d9\")\n read_chunk_size = 1024\n\n with socket.create_connection((self.hostname, self.port)) as sock:\n with ctx.wrap_socket(sock, server_hostname=self.hostname) as ssock:\n ssock.write(self.auth_packet)\n buf = bytearray()\n while self.streaming:\n dr = ssock.recv(read_chunk_size)\n if not dr:\n break\n buf += dr\n img, buf = self.__find_jpeg__(buf, jpeg_start, jpeg_end)\n if img:\n img_callback(bytes(img))\n\n def start_stream(self, img_callback):\n if self.streaming:\n print(\"Stream already running.\")\n return\n\n self.streaming = True\n self.stream_thread = threading.Thread(\n target=self.capture_stream, args=(img_callback,)\n )\n self.stream_thread.start()\n\n def stop_stream(self):\n if not self.streaming:\n print(\"Stream is not running.\")\n return\n\n self.streaming = False\n self.stream_thread.join()"
},
{
"identifier": "WatchClient",
"path": "bambu_connect/WatchClient.py",
"snippet": "class WatchClient:\n def __init__(self, hostname: str, access_code: str, serial: str):\n self.hostname = hostname\n self.access_code = access_code\n self.serial = serial\n self.client = self.__setup_mqtt_client()\n self.values = {}\n self.printerStatus = None\n self.message_callback = None\n\n def __setup_mqtt_client(self) -> mqtt.Client:\n client = mqtt.Client()\n client.username_pw_set(\"bblp\", self.access_code)\n client.tls_set(tls_version=ssl.PROTOCOL_TLS, cert_reqs=ssl.CERT_NONE)\n client.tls_insecure_set(True)\n client.on_connect = self.on_connect\n client.on_message = self.on_message\n return client\n\n def on_connect(self, client: mqtt.Client, userdata: Any, flags: Any, rc: int):\n client.subscribe(f\"device/{self.serial}/report\")\n if self.on_connect_callback:\n self.on_connect_callback()\n\n def start(\n self,\n message_callback: Optional[Callable[[PrinterStatus], None]] = None,\n on_connect_callback: Optional[Callable[[], None]] = None,\n ):\n self.message_callback = message_callback\n self.on_connect_callback = on_connect_callback\n self.client.connect(self.hostname, 8883, 60)\n self.client.loop_start()\n\n def stop(self):\n self.client.loop_stop()\n self.client.disconnect()\n\n def on_message(self, client, userdata, msg):\n doc = json.loads(msg.payload)\n try:\n if not doc:\n return\n\n self.values = dict(self.values, **doc[\"print\"])\n self.printerStatus = PrinterStatus(**self.values)\n\n if self.message_callback:\n self.message_callback(self.printerStatus)\n except KeyError:\n pass"
},
{
"identifier": "ExecuteClient",
"path": "bambu_connect/ExecuteClient.py",
"snippet": "class ExecuteClient:\n def __init__(self, hostname: str, access_code: str, serial: str):\n self.hostname = hostname\n self.access_code = access_code\n self.serial = serial\n self.client = self.__setup_mqtt_client()\n\n def __setup_mqtt_client(self):\n client = mqtt.Client()\n client.username_pw_set(\"bblp\", self.access_code)\n client.tls_set(tls_version=ssl.PROTOCOL_TLS, cert_reqs=ssl.CERT_NONE)\n client.tls_insecure_set(True)\n client.connect(self.hostname, 8883, 60)\n return client\n\n def disconnect(self):\n self.client.disconnect()\n\n def send_command(self, payload):\n self.client.loop_start()\n self.client.publish(f\"device/{self.serial}/request\", payload)\n self.client.loop_stop()\n\n def send_gcode(self, gcode):\n payload = f'{{\"print\": {{\"command\": \"gcode_line\", \"sequence_id\": 2006, \"param\": \"{gcode} \\n\"}}, \"user_id\":\"1234567890\"}}'\n self.send_command(payload)\n\n # this dumps all the printer stats, for minor print updates the printer will send them automatically.\n def dump_info(self):\n payload = f'{{\"pushing\": {{ \"sequence_id\": 1, \"command\": \"pushall\"}}, \"user_id\":\"1234567890\"}}'\n self.send_command(payload)\n\n # when using this, choose the send to printer option in bambu or cura slicer. Provide the file name (no path)\n def start_print(self, file):\n payload = json.dumps(\n {\n \"print\": {\n \"sequence_id\": 13,\n \"command\": \"project_file\",\n \"param\": \"Metadata/plate_1.gcode\",\n \"subtask_name\": f\"{file}\",\n \"url\": f\"ftp://{file}\",\n \"bed_type\": \"auto\",\n \"timelapse\": False,\n \"bed_leveling\": True,\n \"flow_cali\": False,\n \"vibration_cali\": True,\n \"layer_inspect\": False,\n \"use_ams\": False,\n \"profile_id\": \"0\",\n \"project_id\": \"0\",\n \"subtask_id\": \"0\",\n \"task_id\": \"0\",\n }\n }\n )\n self.send_command(payload)"
},
{
"identifier": "FileClient",
"path": "bambu_connect/FileClient.py",
"snippet": "class FileClient:\n def __init__(self, hostname: str, access_code: str, serial: str):\n self.hostname = hostname\n self.access_code = access_code\n self.serial = serial\n\n def get_files(self, directory=\"/\", extension=\".3mf\"):\n command = [\n \"curl\",\n \"--ftp-pasv\",\n \"--insecure\",\n f\"ftps://{self.hostname}{directory}\",\n \"--user\",\n f\"bblp:{self.access_code}\",\n ]\n result = subprocess.run(command, capture_output=True, text=True)\n\n filtered_files = []\n for line in result.stdout.split(\"\\n\"):\n if line.strip():\n parts = re.split(r\"\\s+\", line, maxsplit=8)\n filename = parts[-1]\n\n if filename.endswith(extension):\n filtered_files.append(filename)\n\n return filtered_files\n\n def download_file(self, remote_path: str, local_path: str, verbose=True):\n if not os.path.exists(local_path):\n os.makedirs(local_path)\n\n local_file_path = os.path.join(local_path, os.path.basename(remote_path))\n command = [\n \"curl\",\n \"-o\",\n local_file_path,\n \"--ftp-pasv\",\n \"--insecure\",\n f\"ftps://{self.hostname}{remote_path}\",\n \"--user\",\n f\"bblp:{self.access_code}\",\n ]\n \n if verbose:\n result = subprocess.run(command)\n else:\n result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if result.returncode != 0:\n if verbose:\n print(result.stderr.decode())\n return False\n\n return True"
},
{
"identifier": "PrinterStatus",
"path": "bambu_connect/utils/models.py",
"snippet": "class PrinterStatus:\n upload: Optional[Upload] = None\n nozzle_temper: Optional[float] = None\n nozzle_target_temper: Optional[float] = None\n bed_temper: Optional[float] = None\n bed_target_temper: Optional[float] = None\n chamber_temper: Optional[float] = None\n mc_print_stage: Optional[str] = None\n heatbreak_fan_speed: Optional[str] = None\n cooling_fan_speed: Optional[str] = None\n big_fan1_speed: Optional[str] = None\n big_fan2_speed: Optional[str] = None\n mc_percent: Optional[int] = None\n mc_remaining_time: Optional[int] = None\n ams_status: Optional[int] = None\n ams_rfid_status: Optional[int] = None\n hw_switch_state: Optional[int] = None\n spd_mag: Optional[int] = None\n spd_lvl: Optional[int] = None\n print_error: Optional[int] = None\n lifecycle: Optional[str] = None\n wifi_signal: Optional[str] = None\n gcode_state: Optional[str] = None\n gcode_file_prepare_percent: Optional[str] = None\n queue_number: Optional[int] = None\n queue_total: Optional[int] = None\n queue_est: Optional[int] = None\n queue_sts: Optional[int] = None\n project_id: Optional[str] = None\n profile_id: Optional[str] = None\n task_id: Optional[str] = None\n subtask_id: Optional[str] = None\n subtask_name: Optional[str] = None\n gcode_file: Optional[str] = None\n stg: Optional[List[Any]] = None\n stg_cur: Optional[int] = None\n print_type: Optional[str] = None\n home_flag: Optional[int] = None\n mc_print_line_number: Optional[str] = None\n mc_print_sub_stage: Optional[int] = None\n sdcard: Optional[bool] = None\n force_upgrade: Optional[bool] = None\n mess_production_state: Optional[str] = None\n layer_num: Optional[int] = None\n total_layer_num: Optional[int] = None\n s_obj: Optional[List[Any]] = None\n fan_gear: Optional[int] = None\n hms: Optional[List[Any]] = None\n online: Optional[Online] = None\n ams: Optional[AMS] = None\n ipcam: Optional[IPCam] = None\n vt_tray: Optional[VTTray] = None\n lights_report: Optional[List[LightsReport]] = None\n upgrade_state: Optional[UpgradeState] = None\n command: Optional[str] = None\n msg: Optional[int] = None\n sequence_id: Optional[str] = None\n\n def __init__(self, **data):\n self.upload = Upload(**data[\"upload\"]) if \"upload\" in data else None\n self.nozzle_temper = data.get(\"nozzle_temper\")\n self.nozzle_target_temper = data.get(\"nozzle_target_temper\")\n self.bed_temper = data.get(\"bed_temper\")\n self.bed_target_temper = data.get(\"bed_target_temper\")\n self.chamber_temper = data.get(\"chamber_temper\")\n self.mc_print_stage = data.get(\"mc_print_stage\")\n self.heatbreak_fan_speed = data.get(\"heatbreak_fan_speed\")\n self.cooling_fan_speed = data.get(\"cooling_fan_speed\")\n self.big_fan1_speed = data.get(\"big_fan1_speed\")\n self.big_fan2_speed = data.get(\"big_fan2_speed\")\n self.mc_percent = data.get(\"mc_percent\")\n self.mc_remaining_time = data.get(\"mc_remaining_time\")\n self.ams_status = data.get(\"ams_status\")\n self.ams_rfid_status = data.get(\"ams_rfid_status\")\n self.hw_switch_state = data.get(\"hw_switch_state\")\n self.spd_mag = data.get(\"spd_mag\")\n self.spd_lvl = data.get(\"spd_lvl\")\n self.print_error = data.get(\"print_error\")\n self.lifecycle = data.get(\"lifecycle\")\n self.wifi_signal = data.get(\"wifi_signal\")\n self.gcode_state = data.get(\"gcode_state\")\n self.gcode_file_prepare_percent = data.get(\"gcode_file_prepare_percent\")\n self.queue_number = data.get(\"queue_number\")\n self.queue_total = data.get(\"queue_total\")\n self.queue_est = data.get(\"queue_est\")\n self.queue_sts = data.get(\"queue_sts\")\n self.project_id = data.get(\"project_id\")\n self.profile_id = data.get(\"profile_id\")\n self.task_id = data.get(\"task_id\")\n self.subtask_id = data.get(\"subtask_id\")\n self.subtask_name = data.get(\"subtask_name\")\n self.gcode_file = data.get(\"gcode_file\")\n self.stg = data.get(\"stg\", [])\n self.stg_cur = data.get(\"stg_cur\")\n self.print_type = data.get(\"print_type\")\n self.home_flag = data.get(\"home_flag\")\n self.mc_print_line_number = data.get(\"mc_print_line_number\")\n self.mc_print_sub_stage = data.get(\"mc_print_sub_stage\")\n self.sdcard = data.get(\"sdcard\", False)\n self.force_upgrade = data.get(\"force_upgrade\", False)\n self.mess_production_state = data.get(\"mess_production_state\")\n self.layer_num = data.get(\"layer_num\")\n self.total_layer_num = data.get(\"total_layer_num\")\n self.s_obj = data.get(\"s_obj\", [])\n self.fan_gear = data.get(\"fan_gear\")\n self.hms = data.get(\"hms\", [])\n self.online = Online(**data[\"online\"]) if \"online\" in data else None\n self.ams = AMS(**data[\"ams\"]) if \"ams\" in data else None\n self.ipcam = IPCam(**data[\"ipcam\"]) if \"ipcam\" in data else None\n self.vt_tray = VTTray(**data[\"vt_tray\"]) if \"vt_tray\" in data else None\n self.lights_report = [LightsReport(**lr) for lr in data.get(\"lights_report\", [])]\n self.upgrade_state = UpgradeState(**data[\"upgrade_state\"]) if \"upgrade_state\" in data else None\n self.command = data.get(\"command\")\n self.msg = data.get(\"msg\")\n self.sequence_id = data.get(\"sequence_id\")"
}
] | from .CameraClient import CameraClient
from .WatchClient import WatchClient
from .ExecuteClient import ExecuteClient
from .FileClient import FileClient
from .utils.models import PrinterStatus
from typing import Callable, Dict, Optional, Any | 3,916 |
class BambuClient:
def __init__(self, hostname: str, access_code: str, serial: str):
self.cameraClient = CameraClient(hostname, access_code)
self.watchClient = WatchClient(hostname, access_code, serial)
self.executeClient = ExecuteClient(hostname, access_code, serial)
|
class BambuClient:
def __init__(self, hostname: str, access_code: str, serial: str):
self.cameraClient = CameraClient(hostname, access_code)
self.watchClient = WatchClient(hostname, access_code, serial)
self.executeClient = ExecuteClient(hostname, access_code, serial) | self.fileClient = FileClient(hostname, access_code, serial) | 3 | 2023-12-16 05:31:56+00:00 | 8k |
linyq2117/TagCLIP | classify.py | [
{
"identifier": "scoremap2bbox",
"path": "utils.py",
"snippet": "def scoremap2bbox(scoremap, threshold, multi_contour_eval=False):\n height, width = scoremap.shape\n scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)\n _, thr_gray_heatmap = cv2.threshold(\n src=scoremap_image,\n thresh=int(threshold * np.max(scoremap_image)),\n maxval=255,\n type=cv2.THRESH_BINARY)\n contours = cv2.findContours(\n image=thr_gray_heatmap,\n mode=cv2.RETR_EXTERNAL,\n method=cv2.CHAIN_APPROX_SIMPLE)[_CONTOUR_INDEX]\n\n if len(contours) == 0:\n return np.asarray([[0, 0, 0, 0]]), 1\n\n if not multi_contour_eval:\n contours = [max(contours, key=cv2.contourArea)]\n\n estimated_boxes = []\n for contour in contours:\n x, y, w, h = cv2.boundingRect(contour)\n x0, y0, x1, y1 = x, y, x + w, y + h\n x1 = min(x1, width - 1)\n y1 = min(y1, height - 1)\n estimated_boxes.append([x0, y0, x1, y1])\n\n return np.asarray(estimated_boxes), len(contours)"
},
{
"identifier": "parse_xml_to_dict",
"path": "utils.py",
"snippet": "def parse_xml_to_dict(xml):\n \"\"\"\n Args:\n xml: xml tree obtained by parsing XML file contents using lxml.etree\n\n Returns:\n Python dictionary holding XML contents.\n \"\"\"\n\n if len(xml) == 0:\n return {xml.tag: xml.text}\n\n result = {}\n for child in xml:\n child_result = parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}"
},
{
"identifier": "_convert_image_to_rgb",
"path": "utils.py",
"snippet": "def _convert_image_to_rgb(image):\n return image.convert(\"RGB\")"
},
{
"identifier": "compute_AP",
"path": "utils.py",
"snippet": "def compute_AP(predictions, labels):\n num_class = predictions.size(1)\n ap = torch.zeros(num_class).to(predictions.device)\n empty_class = 0\n for idx_cls in range(num_class):\n prediction = predictions[:, idx_cls]\n label = labels[:, idx_cls]\n #mask = label.abs() == 1\n if (label > 0).sum() == 0:\n empty_class += 1\n continue\n binary_label = torch.clamp(label, min=0, max=1)\n sorted_pred, sort_idx = prediction.sort(descending=True)\n sorted_label = binary_label[sort_idx]\n tmp = (sorted_label == 1).float()\n tp = tmp.cumsum(0)\n fp = (sorted_label != 1).float().cumsum(0)\n num_pos = binary_label.sum()\n rec = tp/num_pos\n prec = tp/(tp+fp)\n ap_cls = (tmp*prec).sum()/num_pos\n ap[idx_cls].copy_(ap_cls)\n return ap"
},
{
"identifier": "compute_F1",
"path": "utils.py",
"snippet": "def compute_F1(predictions, labels, mode_F1, k_val, use_relative=False):\n if k_val >= 1:\n idx = predictions.topk(dim=1, k=k_val)[1]\n predictions.fill_(0)\n predictions.scatter_(dim=1, index=idx, src=torch.ones(predictions.size(0), k_val, dtype=predictions.dtype).to(predictions.device))\n else:\n if use_relative:\n ma = predictions.max(dim=1)[0]\n mi = predictions.min(dim=1)[0]\n step = ma - mi\n thres = mi + k_val * step\n \n for i in range(predictions.shape[0]):\n predictions[i][predictions[i] > thres[i]] = 1\n predictions[i][predictions[i] <= thres[i]] = 0\n else:\n predictions[predictions > k_val] = 1\n predictions[predictions <= k_val] = 0\n \n if mode_F1 == 'overall':\n predictions = predictions.bool()\n labels = labels.bool()\n TPs = ( predictions & labels).sum()\n FPs = ( predictions & ~labels).sum()\n FNs = (~predictions & labels).sum()\n eps = 1.e-9\n Ps = TPs / (TPs + FPs + eps)\n Rs = TPs / (TPs + FNs + eps)\n p = Ps.mean()\n r = Rs.mean()\n f1 = 2*p*r/(p+r)\n \n \n elif mode_F1 == 'category':\n # calculate P and R\n predictions = predictions.bool()\n labels = labels.bool()\n TPs = ( predictions & labels).sum(axis=0)\n FPs = ( predictions & ~labels).sum(axis=0)\n FNs = (~predictions & labels).sum(axis=0)\n eps = 1.e-9\n Ps = TPs / (TPs + FPs + eps)\n Rs = TPs / (TPs + FNs + eps)\n p = Ps.mean()\n r = Rs.mean()\n f1 = 2*p*r/(p+r)\n \n elif mode_F1 == 'sample':\n # calculate P and R\n predictions = predictions.bool()\n labels = labels.bool()\n TPs = ( predictions & labels).sum(axis=1)\n FPs = ( predictions & ~labels).sum(axis=1)\n FNs = (~predictions & labels).sum(axis=1)\n eps = 1.e-9\n Ps = TPs / (TPs + FPs + eps)\n Rs = TPs / (TPs + FNs + eps)\n p = Ps.mean()\n r = Rs.mean()\n f1 = 2*p*r/(p+r)\n\n return f1, p, r"
},
{
"identifier": "_transform_resize",
"path": "utils.py",
"snippet": "def _transform_resize(h, w):\n return Compose([\n #Resize(n_px, interpolation=BICUBIC),\n Resize((h,w), interpolation=BICUBIC),\n #CenterCrop(n_px),\n #RandomHorizontalFlip(1.0),\n _convert_image_to_rgb,\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])"
},
{
"identifier": "class_names_voc",
"path": "clip_text.py",
"snippet": "BACKGROUND_CATEGORY_VOC = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','keyboard','helmet',\n 'cloud','house','mountain','ocean','road','rock','street','valley','bridge','sign',\n ]\nBACKGROUND_CATEGORY_COCO = ['ground','land','grass','tree','building','wall','sky','lake','water','river','sea','railway','railroad','helmet',\n 'cloud','house','mountain','ocean','road','rock','street','valley','bridge',\n ]"
}
] | import clip
import torch
import cv2
import numpy as np
import pickle
import os
import math
import torch.nn.functional as F
import os
import argparse
import warnings
from PIL import Image
from tqdm import tqdm
from lxml import etree
from utils import scoremap2bbox, parse_xml_to_dict, _convert_image_to_rgb, compute_AP, compute_F1, _transform_resize
from clip_text import class_names_voc, BACKGROUND_CATEGORY_VOC, class_names_coco, BACKGROUND_CATEGORY_COCO, class_names_coco_stuff182_dict, coco_stuff_182_to_27 | 4,339 | if len(array_img.shape) == 2:
array_img = np.stack([array_img, array_img, array_img], axis=2)
pil_img = Image.fromarray(np.uint8(array_img))
if model_type == 'clip':
patch_size = 16
preprocess = _transform_resize(int(np.ceil(int(ori_height) / patch_size) * patch_size), int(np.ceil(int(ori_width) / patch_size) * patch_size))
image = preprocess(pil_img).unsqueeze(0).to(device)
with torch.no_grad():
# Extract image features
h, w = image.shape[-2], image.shape[-1]
image_features, attn_weight_list = model.encode_image_tagclip(image, h, w, attn_mask=1)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
attn_weight = [aw[:, 1:, 1:] for aw in attn_weight_list]
attn_vote = torch.stack(attn_weight, dim=0).squeeze()
thres0 = attn_vote.reshape(attn_vote.shape[0], -1)
thres0 = torch.mean(thres0, dim=-1).reshape(attn_vote.shape[0], 1, 1)
thres0 = thres0.repeat(1, attn_vote.shape[1], attn_vote.shape[2])
if args.dataset == 'cocostuff':
attn_weight = torch.stack(attn_weight, dim=0)[:-1]
else:
attn_weight = torch.stack(attn_weight, dim=0)[8:-1]
attn_cnt = attn_vote > thres0
attn_cnt = attn_cnt.float()
attn_cnt = torch.sum(attn_cnt, dim=0)
attn_cnt = attn_cnt >= 4
attn_weight = torch.mean(attn_weight, dim=0)[0]
attn_weight = attn_weight * attn_cnt.float()
logit_scale = model.logit_scale.exp()
logits = logit_scale * image_features @ text_features.t()#torch.Size([1, 197, 81])
logits = logits[:, 1:, :]
logits = logits.softmax(dim=-1)
logits_coarse = logits.squeeze()
logits = torch.matmul(attn_weight, logits)
logits = logits.squeeze()
logits = mask_attn(logits_coarse, logits, h, w, attn_weight)
logits_max = torch.max(logits, dim=0)[0]
logits_max = logits_max[:NUM_CLASSES]
logits_max = cwr(logits, logits_max, h, w, image, text_features)
logits_max = logits_max.cpu().numpy()
pred_label_id.append(logits_max)
else:
raise NotImplementedError()
gt_one_hot = np.zeros((len(gt_label_id), NUM_CLASSES))
for i in range(len(gt_label_id)):
gt_ids = gt_label_id[i]
for gt_id in gt_ids:
gt_one_hot[i][gt_id] = 1
predictions = torch.tensor(pred_label_id)
labels = torch.tensor(gt_one_hot)
# compute AP
ap = compute_AP(predictions, labels)
print('================================================')
print('mAP: %.6f' % torch.mean(ap))
# compute F1, P, R with specific relative threshold
ma = predictions.max(dim=1)[0]
mi = predictions.min(dim=1)[0]
step = ma - mi
if args.dataset == 'cocostuff':
thres_abs = 0.1
else:
thres_abs = 0.5
F1, P, R = compute_F1(predictions.clone(), labels.clone(), 'overall', thres_abs, use_relative=True)
print('F1: %.6f, Precision: %.6f, Recall: %.6f' % (torch.mean(F1), torch.mean(P), torch.mean(R)))
print('================================================\n')
#save class labels
if args.save_file:
save_path = './output/{}_val_tagclip.txt'.format(args.dataset)
print('>>>writing to {}'.format(save_path))
thres_rel = mi + thres_abs * step
with open(save_path, 'w') as f:
for im_idx, im in enumerate(image_list):
line = im.replace('.jpg','')
for index, value in enumerate(pred_label_id[im_idx]):
if value > thres_rel[im_idx]:
line += " {}".format(index)
if line == im.replace('.jpg',''):
line += " {}".format(np.argmax(pred_label_id[im_idx]))
line += "\n"
f.writelines(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', type=str, default='voc2007', choices=['voc2007', 'voc2012', 'coco2014', 'coco2017', 'cocostuff'])
parser.add_argument('--img_root', type=str, default='./datasets/VOC2007/JPEGImages')
parser.add_argument('--split_file', type=str, default='./datasets/VOC2007/ImageSets/Main/test.txt')
parser.add_argument('--model_path', type=str, default='ViT-B/16')
parser.add_argument('--save_file', action="store_true")
args = parser.parse_args()
if args.dataset in ['voc2007', 'voc2012']:
class_names = class_names_voc + BACKGROUND_CATEGORY_VOC
NUM_CLASSES = len(class_names_voc)
elif args.dataset in ['coco2014', 'coco2017']:
class_names = class_names_coco + BACKGROUND_CATEGORY_COCO
NUM_CLASSES = len(class_names_coco)
else:
coco_stuff_182_to_171 = {}
cnt = 0
|
warnings.filterwarnings("ignore")
def mask_attn(logits_coarse, logits, h, w, attn_weight):
patch_size = 16
candidate_cls_list = []
logits_refined = logits.clone()
logits_max = torch.max(logits, dim=0)[0]
for tempid,tempv in enumerate(logits_max):
if tempv > 0:
candidate_cls_list.append(tempid)
for ccls in candidate_cls_list:
temp_logits = logits[:,ccls]
temp_logits = temp_logits - temp_logits.min()
temp_logits = temp_logits / temp_logits.max()
mask = temp_logits
mask = mask.reshape(h // patch_size, w // patch_size)
box, cnt = scoremap2bbox(mask.detach().cpu().numpy(), threshold=temp_logits.mean(), multi_contour_eval=True)
aff_mask = torch.zeros((mask.shape[0],mask.shape[1])).to(device)
for i_ in range(cnt):
x0_, y0_, x1_, y1_ = box[i_]
aff_mask[y0_:y1_, x0_:x1_] = 1
aff_mask = aff_mask.view(1,mask.shape[0] * mask.shape[1])
trans_mat = attn_weight * aff_mask
logits_refined_ccls = torch.matmul(trans_mat, logits_coarse[:,ccls:ccls+1])
logits_refined[:, ccls] = logits_refined_ccls.squeeze()
return logits_refined
def cwr(logits, logits_max, h, w, image, text_features):
patch_size = 16
input_size = 224
stride = input_size // patch_size
candidate_cls_list = []
ma = logits.max()
mi = logits.min()
step = ma - mi
if args.dataset == 'cocostuff':
thres_abs = 0.1
else:
thres_abs = 0.5
thres = mi + thres_abs*step
for tempid,tempv in enumerate(logits_max):
if tempv > thres:
candidate_cls_list.append(tempid)
for ccls in candidate_cls_list:
temp_logits = logits[:,ccls]
temp_logits = temp_logits - temp_logits.min()
temp_logits = temp_logits / temp_logits.max()
mask = temp_logits > 0.5
mask = mask.reshape(h // patch_size, w // patch_size)
horizontal_indicies = np.where(np.any(mask.cpu().numpy(), axis=0))[0]
vertical_indicies = np.where(np.any(mask.cpu().numpy(), axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
x2 += 1
y2 += 1
else:
x1, x2, y1, y2 = 0, 0, 0, 0
y1 = max(y1, 0)
x1 = max(x1, 0)
y2 = min(y2, mask.shape[-2] - 1)
x2 = min(x2, mask.shape[-1] - 1)
if x1 == x2 or y1 == y2:
return logits_max
mask = mask[y1:y2, x1:x2]
mask = mask.float()
mask = mask[None, None, :, :]
mask = F.interpolate(mask, size=(stride, stride), mode="nearest")
mask = mask.squeeze()
mask = mask.reshape(-1).bool()
image_cut = image[:, :, int(y1*patch_size):int(y2*patch_size), int(x1*patch_size):int(x2*patch_size)]
image_cut = F.interpolate(image_cut, size=(input_size, input_size), mode="bilinear", align_corners=False)
cls_attn = 1 - torch.ones((stride*stride+1, stride*stride+1))
for j in range(1, cls_attn.shape[1]):
if not mask[j - 1]:
cls_attn[0, j] = -1000
image_features = model.encode_image_tagclip(image_cut, input_size, input_size, attn_mask=cls_attn)[0]
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
logit_scale = model.logit_scale.exp()
cur_logits = logit_scale * image_features @ text_features.t()
cur_logits = cur_logits[:, 0, :]
cur_logits = cur_logits.softmax(dim=-1).squeeze()
cur_logits_norm = cur_logits[ccls]
logits_max[ccls] = 0.5 * logits_max[ccls] + (1 - 0.5) * cur_logits_norm
return logits_max
def classify():
pred_label_id = []
gt_label_id = []
with torch.no_grad():
text_features = clip.encode_text_with_prompt_ensemble(model, class_names, device)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
for im_idx, im in enumerate(tqdm(image_list)):
image_path = os.path.join(args.img_root, im)
label_id_list = all_label_list[im_idx]
label_id_list = [int(lid) for lid in label_id_list]
if args.dataset == 'cocostuff':
label_id_list = [coco_stuff_182_to_171[int(lid)] for lid in label_id_list]
gt_label_id.append(label_id_list)
pil_img = Image.open(image_path)
array_img = np.array(pil_img)
ori_height, ori_width = array_img.shape[:2]
if len(array_img.shape) == 2:
array_img = np.stack([array_img, array_img, array_img], axis=2)
pil_img = Image.fromarray(np.uint8(array_img))
if model_type == 'clip':
patch_size = 16
preprocess = _transform_resize(int(np.ceil(int(ori_height) / patch_size) * patch_size), int(np.ceil(int(ori_width) / patch_size) * patch_size))
image = preprocess(pil_img).unsqueeze(0).to(device)
with torch.no_grad():
# Extract image features
h, w = image.shape[-2], image.shape[-1]
image_features, attn_weight_list = model.encode_image_tagclip(image, h, w, attn_mask=1)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
attn_weight = [aw[:, 1:, 1:] for aw in attn_weight_list]
attn_vote = torch.stack(attn_weight, dim=0).squeeze()
thres0 = attn_vote.reshape(attn_vote.shape[0], -1)
thres0 = torch.mean(thres0, dim=-1).reshape(attn_vote.shape[0], 1, 1)
thres0 = thres0.repeat(1, attn_vote.shape[1], attn_vote.shape[2])
if args.dataset == 'cocostuff':
attn_weight = torch.stack(attn_weight, dim=0)[:-1]
else:
attn_weight = torch.stack(attn_weight, dim=0)[8:-1]
attn_cnt = attn_vote > thres0
attn_cnt = attn_cnt.float()
attn_cnt = torch.sum(attn_cnt, dim=0)
attn_cnt = attn_cnt >= 4
attn_weight = torch.mean(attn_weight, dim=0)[0]
attn_weight = attn_weight * attn_cnt.float()
logit_scale = model.logit_scale.exp()
logits = logit_scale * image_features @ text_features.t()#torch.Size([1, 197, 81])
logits = logits[:, 1:, :]
logits = logits.softmax(dim=-1)
logits_coarse = logits.squeeze()
logits = torch.matmul(attn_weight, logits)
logits = logits.squeeze()
logits = mask_attn(logits_coarse, logits, h, w, attn_weight)
logits_max = torch.max(logits, dim=0)[0]
logits_max = logits_max[:NUM_CLASSES]
logits_max = cwr(logits, logits_max, h, w, image, text_features)
logits_max = logits_max.cpu().numpy()
pred_label_id.append(logits_max)
else:
raise NotImplementedError()
gt_one_hot = np.zeros((len(gt_label_id), NUM_CLASSES))
for i in range(len(gt_label_id)):
gt_ids = gt_label_id[i]
for gt_id in gt_ids:
gt_one_hot[i][gt_id] = 1
predictions = torch.tensor(pred_label_id)
labels = torch.tensor(gt_one_hot)
# compute AP
ap = compute_AP(predictions, labels)
print('================================================')
print('mAP: %.6f' % torch.mean(ap))
# compute F1, P, R with specific relative threshold
ma = predictions.max(dim=1)[0]
mi = predictions.min(dim=1)[0]
step = ma - mi
if args.dataset == 'cocostuff':
thres_abs = 0.1
else:
thres_abs = 0.5
F1, P, R = compute_F1(predictions.clone(), labels.clone(), 'overall', thres_abs, use_relative=True)
print('F1: %.6f, Precision: %.6f, Recall: %.6f' % (torch.mean(F1), torch.mean(P), torch.mean(R)))
print('================================================\n')
#save class labels
if args.save_file:
save_path = './output/{}_val_tagclip.txt'.format(args.dataset)
print('>>>writing to {}'.format(save_path))
thres_rel = mi + thres_abs * step
with open(save_path, 'w') as f:
for im_idx, im in enumerate(image_list):
line = im.replace('.jpg','')
for index, value in enumerate(pred_label_id[im_idx]):
if value > thres_rel[im_idx]:
line += " {}".format(index)
if line == im.replace('.jpg',''):
line += " {}".format(np.argmax(pred_label_id[im_idx]))
line += "\n"
f.writelines(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dataset', type=str, default='voc2007', choices=['voc2007', 'voc2012', 'coco2014', 'coco2017', 'cocostuff'])
parser.add_argument('--img_root', type=str, default='./datasets/VOC2007/JPEGImages')
parser.add_argument('--split_file', type=str, default='./datasets/VOC2007/ImageSets/Main/test.txt')
parser.add_argument('--model_path', type=str, default='ViT-B/16')
parser.add_argument('--save_file', action="store_true")
args = parser.parse_args()
if args.dataset in ['voc2007', 'voc2012']:
class_names = class_names_voc + BACKGROUND_CATEGORY_VOC
NUM_CLASSES = len(class_names_voc)
elif args.dataset in ['coco2014', 'coco2017']:
class_names = class_names_coco + BACKGROUND_CATEGORY_COCO
NUM_CLASSES = len(class_names_coco)
else:
coco_stuff_182_to_171 = {}
cnt = 0 | for label_id in coco_stuff_182_to_27: | 6 | 2023-12-21 03:20:47+00:00 | 8k |
video-db/videodb-python | videodb/client.py | [
{
"identifier": "ApiPath",
"path": "videodb/_constants.py",
"snippet": "class ApiPath:\n collection = \"collection\"\n upload = \"upload\"\n video = \"video\"\n stream = \"stream\"\n thumbnail = \"thumbnail\"\n upload_url = \"upload_url\"\n transcription = \"transcription\"\n index = \"index\"\n search = \"search\"\n compile = \"compile\"\n workflow = \"workflow\""
},
{
"identifier": "Collection",
"path": "videodb/collection.py",
"snippet": "class Collection:\n def __init__(self, _connection, id: str, name: str = None, description: str = None):\n self._connection = _connection\n self.id = id\n self.name = name\n self.description = description\n\n def get_videos(self) -> list[Video]:\n videos_data = self._connection.get(path=f\"{ApiPath.video}\")\n return [Video(self._connection, **video) for video in videos_data.get(\"videos\")]\n\n def get_video(self, video_id: str) -> Video:\n video_data = self._connection.get(path=f\"{ApiPath.video}/{video_id}\")\n return Video(self._connection, **video_data)\n\n def delete_video(self, video_id: str) -> None:\n \"\"\"Delete the video\n\n :param str video_id: The id of the video to be deleted\n :raises InvalidRequestError: If the delete fails\n :return: None if the delete is successful\n :rtype: None\n \"\"\"\n return self._connection.delete(path=f\"{ApiPath.video}/{video_id}\")\n\n def search(\n self,\n query: str,\n type: Optional[str] = SearchType.semantic,\n result_threshold: Optional[int] = None,\n score_threshold: Optional[int] = None,\n dynamic_score_percentage: Optional[int] = None,\n ) -> SearchResult:\n search = SearchFactory(self._connection).get_search(type)\n return search.search_inside_collection(\n self.id,\n query,\n result_threshold,\n score_threshold,\n dynamic_score_percentage,\n )\n\n def upload(\n self,\n file_path: str = None,\n url: Optional[str] = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n callback_url: Optional[str] = None,\n ) -> Video:\n upload_data = upload(\n self._connection,\n file_path,\n url,\n name,\n description,\n callback_url,\n )\n return Video(self._connection, **upload_data) if upload_data else None"
},
{
"identifier": "HttpClient",
"path": "videodb/_utils/_http_client.py",
"snippet": "class HttpClient:\n \"\"\"Http client for making requests\"\"\"\n\n def __init__(\n self,\n api_key: str,\n base_url: str,\n max_retries: Optional[int] = HttpClientDefaultValues.max_retries,\n ) -> None:\n \"\"\"Create a new http client instance\n\n :param str api_key: The api key to use for authentication\n :param str base_url: The base url to use for the api\n :param int max_retries: (optional) The maximum number of retries to make for a request\n \"\"\"\n self.session = requests.Session()\n\n retries = Retry(\n total=max_retries,\n backoff_factor=HttpClientDefaultValues.backoff_factor,\n status_forcelist=HttpClientDefaultValues.status_forcelist,\n )\n adapter = HTTPAdapter(max_retries=retries)\n self.session.mount(\"http://\", adapter)\n self.session.mount(\"https://\", adapter)\n self.session.headers.update(\n {\"x-access-token\": api_key, \"Content-Type\": \"application/json\"}\n )\n self.base_url = base_url\n logger.debug(f\"Initialized http client with base url: {self.base_url}\")\n\n def _make_request(\n self,\n method: Callable[..., requests.Response],\n path: str,\n base_url: Optional[str] = None,\n headers: Optional[dict] = None,\n **kwargs,\n ):\n \"\"\"Make a request to the api\n\n :param Callable method: The method to use for the request\n :param str path: The path to make the request to\n :param str base_url: (optional) The base url to use for the request\n :param dict headers: (optional) The headers to use for the request\n :param kwargs: The keyword arguments to pass to the request method\n :return: json response from the request\n \"\"\"\n try:\n url = f\"{base_url or self.base_url}/{path}\"\n timeout = kwargs.pop(\"timeout\", HttpClientDefaultValues.timeout)\n request_headers = {**self.session.headers, **(headers or {})}\n response = method(url, headers=request_headers, timeout=timeout, **kwargs)\n response.raise_for_status()\n return self._parse_response(response)\n\n except requests.exceptions.RequestException as e:\n self._handle_request_error(e)\n\n def _handle_request_error(self, e: requests.exceptions.RequestException) -> None:\n \"\"\"Handle request errors\"\"\"\n\n if isinstance(e, requests.exceptions.HTTPError):\n try:\n error_message = e.response.json().get(\"message\", \"Unknown error\")\n except ValueError:\n error_message = e.response.text\n\n if e.response.status_code == 401:\n raise AuthenticationError(\n f\"Error: {error_message}\", e.response\n ) from None\n else:\n raise InvalidRequestError(\n f\"Invalid request: {error_message}\", e.response\n ) from None\n\n elif isinstance(e, requests.exceptions.RetryError):\n raise InvalidRequestError(\n \"Invalid request: Max retries exceeded\", e.response\n ) from None\n\n elif isinstance(e, requests.exceptions.Timeout):\n raise InvalidRequestError(\n \"Invalid request: Request timed out\", e.response\n ) from None\n\n elif isinstance(e, requests.exceptions.ConnectionError):\n raise InvalidRequestError(\n \"Invalid request: Connection error\", e.response\n ) from None\n\n else:\n raise InvalidRequestError(\n f\"Invalid request: {str(e)}\", e.response\n ) from None\n\n @backoff.on_exception(backoff.expo, Exception, max_time=500, logger=None)\n def _get_output(self, url: str):\n \"\"\"Get the output from an async request\"\"\"\n response_json = self.session.get(url).json()\n if (\n response_json.get(\"status\") == Status.in_progress\n or response_json.get(\"status\") == Status.processing\n ):\n logger.debug(\"Waiting for processing to complete\")\n raise Exception(\"Stuck on processing status\") from None\n return response_json.get(\"response\") or response_json\n\n def _parse_response(self, response: requests.Response):\n \"\"\"Parse the response from the api\"\"\"\n try:\n response_json = response.json()\n if (\n response_json.get(\"status\") == Status.processing\n and response_json.get(\"request_type\", \"sync\") == \"async\"\n ):\n return None\n elif (\n response_json.get(\"status\") == Status.processing\n and response_json.get(\"request_type\", \"sync\") == \"sync\"\n ):\n response_json = self._get_output(\n response_json.get(\"data\").get(\"output_url\")\n )\n if response_json.get(\"success\"):\n return response_json.get(\"data\")\n else:\n raise InvalidRequestError(\n f\"Invalid request: {response_json.get('message')}\", response\n ) from None\n\n elif response_json.get(\"success\"):\n return response_json.get(\"data\")\n\n else:\n raise InvalidRequestError(\n f\"Invalid request: {response_json.get('message')}\", response\n ) from None\n\n except ValueError:\n raise InvalidRequestError(\n f\"Invalid request: {response.text}\", response\n ) from None\n\n def get(self, path: str, **kwargs) -> requests.Response:\n \"\"\"Make a get request\"\"\"\n return self._make_request(self.session.get, path, **kwargs)\n\n def post(self, path: str, data=None, **kwargs) -> requests.Response:\n \"\"\"Make a post request\"\"\"\n return self._make_request(self.session.post, path, json=data, **kwargs)\n\n def put(self, path: str, data=None, **kwargs) -> requests.Response:\n \"\"\"Make a put request\"\"\"\n return self._make_request(self.session.put, path, json=data, **kwargs)\n\n def delete(self, path: str, **kwargs) -> requests.Response:\n \"\"\"Make a delete request\"\"\"\n return self._make_request(self.session.delete, path, **kwargs)\n\n def patch(self, path: str, data=None, **kwargs) -> requests.Response:\n \"\"\"Make a patch request\"\"\"\n return self._make_request(self.session.patch, path, json=data, **kwargs)"
},
{
"identifier": "Video",
"path": "videodb/video.py",
"snippet": "class Video:\n def __init__(self, _connection, id: str, collection_id: str, **kwargs) -> None:\n self._connection = _connection\n self.id = id\n self.collection_id = collection_id\n self.stream_url = kwargs.get(\"stream_url\", None)\n self.player_url = kwargs.get(\"player_url\", None)\n self.name = kwargs.get(\"name\", None)\n self.description = kwargs.get(\"description\", None)\n self.thumbnail_url = kwargs.get(\"thumbnail_url\", None)\n self.length = float(kwargs.get(\"length\", 0.0))\n self.transcript = kwargs.get(\"transcript\", None)\n self.transcript_text = kwargs.get(\"transcript_text\", None)\n\n def __repr__(self) -> str:\n return (\n f\"Video(\"\n f\"id={self.id}, \"\n f\"collection_id={self.collection_id}, \"\n f\"stream_url={self.stream_url}, \"\n f\"player_url={self.player_url}, \"\n f\"name={self.name}, \"\n f\"description={self.description}, \"\n f\"thumbnail_url={self.thumbnail_url}, \"\n f\"length={self.length})\"\n )\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def search(\n self,\n query: str,\n search_type: Optional[str] = SearchType.semantic,\n result_threshold: Optional[int] = None,\n score_threshold: Optional[int] = None,\n dynamic_score_percentage: Optional[int] = None,\n ) -> SearchResult:\n search = SearchFactory(self._connection).get_search(search_type)\n return search.search_inside_video(\n self.id,\n query,\n result_threshold,\n score_threshold,\n dynamic_score_percentage,\n )\n\n def delete(self) -> None:\n \"\"\"Delete the video\n\n :raises InvalidRequestError: If the delete fails\n :return: None if the delete is successful\n :rtype: None\n \"\"\"\n self._connection.delete(path=f\"{ApiPath.video}/{self.id}\")\n\n def generate_stream(self, timeline: Optional[list[tuple[int, int]]] = None) -> str:\n \"\"\"Generate the stream url of the video\n\n :param list timeline: The timeline of the video to be streamed. Defaults to None.\n :raises InvalidRequestError: If the get_stream fails\n :return: The stream url of the video\n :rtype: str\n \"\"\"\n if not timeline and self.stream_url:\n return self.stream_url\n\n stream_data = self._connection.post(\n path=f\"{ApiPath.video}/{self.id}/{ApiPath.stream}\",\n data={\n \"timeline\": timeline,\n \"length\": self.length,\n },\n )\n return stream_data.get(\"stream_url\", None)\n\n def generate_thumbnail(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n thumbnail_data = self._connection.get(\n path=f\"{ApiPath.video}/{self.id}/{ApiPath.thumbnail}\"\n )\n self.thumbnail_url = thumbnail_data.get(\"thumbnail_url\")\n return self.thumbnail_url\n\n def _fetch_transcript(self, force: bool = False) -> None:\n if self.transcript and not force:\n return\n transcript_data = self._connection.get(\n path=f\"{ApiPath.video}/{self.id}/{ApiPath.transcription}\",\n params={\"force\": \"true\" if force else \"false\"},\n )\n self.transcript = transcript_data.get(\"word_timestamps\", [])\n self.transcript_text = transcript_data.get(\"text\", \"\")\n\n def get_transcript(self, force: bool = False) -> list[dict]:\n self._fetch_transcript(force)\n return self.transcript\n\n def get_transcript_text(self, force: bool = False) -> str:\n self._fetch_transcript(force)\n return self.transcript_text\n\n def index_spoken_words(self) -> None:\n \"\"\"Semantic indexing of spoken words in the video\n\n :raises InvalidRequestError: If the video is already indexed\n :return: None if the indexing is successful\n :rtype: None\n \"\"\"\n self._fetch_transcript()\n self._connection.post(\n path=f\"{ApiPath.video}/{self.id}/{ApiPath.index}\",\n data={\n \"index_type\": IndexType.semantic,\n },\n )\n\n def add_subtitle(self) -> str:\n subtitle_data = self._connection.post(\n path=f\"{ApiPath.video}/{self.id}/{ApiPath.workflow}\",\n data={\n \"type\": Workflows.add_subtitles,\n },\n )\n return subtitle_data.get(\"stream_url\", None)\n\n def insert_video(self, video, timestamp: float) -> str:\n \"\"\"Insert a video into another video\n\n :param Video video: The video to be inserted\n :param float timestamp: The timestamp where the video should be inserted\n :raises InvalidRequestError: If the insert fails\n :return: The stream url of the inserted video\n :rtype: str\n \"\"\"\n if timestamp > float(self.length):\n timestamp = float(self.length)\n\n pre_shot = Shot(self._connection, self.id, timestamp, \"\", 0, timestamp)\n inserted_shot = Shot(\n self._connection, video.id, video.length, \"\", 0, video.length\n )\n post_shot = Shot(\n self._connection,\n self.id,\n self.length - timestamp,\n \"\",\n timestamp,\n self.length,\n )\n all_shots = [pre_shot, inserted_shot, post_shot]\n\n compile_data = self._connection.post(\n path=f\"{ApiPath.compile}\",\n data=[\n {\n \"video_id\": shot.video_id,\n \"collection_id\": self.collection_id,\n \"shots\": [(float(shot.start), float(shot.end))],\n }\n for shot in all_shots\n ],\n )\n return compile_data.get(\"stream_url\", None)\n\n def play(self) -> str:\n \"\"\"Open the player url in the browser/iframe and return the stream url\n\n :return: The stream url\n :rtype: str\n \"\"\"\n return play_stream(self.stream_url)"
},
{
"identifier": "upload",
"path": "videodb/_upload.py",
"snippet": "def upload(\n _connection,\n file_path: str = None,\n url: str = None,\n name: Optional[str] = None,\n description: Optional[str] = None,\n callback_url: Optional[str] = None,\n) -> dict:\n if not file_path and not url:\n raise VideodbError(\"Either file_path or url is required\")\n if file_path and url:\n raise VideodbError(\"Only one of file_path or url is allowed\")\n\n if file_path:\n try:\n name = file_path.split(\"/\")[-1].split(\".\")[0] if not name else name\n upload_url_data = _connection.get(\n path=f\"{ApiPath.collection}/{_connection.collection_id}/{ApiPath.upload_url}\",\n params={\"name\": name},\n )\n upload_url = upload_url_data.get(\"upload_url\")\n with open(file_path, \"rb\") as file:\n files = {\"file\": (name, file)}\n response = requests.post(upload_url, files=files)\n response.raise_for_status()\n url = upload_url\n\n except FileNotFoundError as e:\n raise VideodbError(\"File not found\", cause=e)\n\n except HTTPError as e:\n raise VideodbError(\"Error while uploading file\", cause=e)\n\n upload_data = _connection.post(\n path=f\"{ApiPath.collection}/{_connection.collection_id}/{ApiPath.upload}\",\n data={\n \"url\": url,\n \"name\": name,\n \"description\": description,\n \"callback_url\": callback_url,\n },\n )\n return upload_data"
}
] | import logging
from typing import (
Optional,
)
from videodb._constants import (
ApiPath,
)
from videodb.collection import Collection
from videodb._utils._http_client import HttpClient
from videodb.video import Video
from videodb._upload import (
upload,
) | 4,129 |
logger = logging.getLogger(__name__)
class Connection(HttpClient):
def __init__(self, api_key: str, base_url: str) -> None:
self.api_key = api_key
self.base_url = base_url
self.collection_id = "default"
super().__init__(api_key, base_url)
def get_collection(self, collection_id: Optional[str] = "default") -> Collection:
|
logger = logging.getLogger(__name__)
class Connection(HttpClient):
def __init__(self, api_key: str, base_url: str) -> None:
self.api_key = api_key
self.base_url = base_url
self.collection_id = "default"
super().__init__(api_key, base_url)
def get_collection(self, collection_id: Optional[str] = "default") -> Collection: | collection_data = self.get(path=f"{ApiPath.collection}/{collection_id}") | 0 | 2023-12-18 15:20:04+00:00 | 8k |
IDEA-CCNL/Real-Gemini | real_gemini/agent.py | [
{
"identifier": "GPT4VTool",
"path": "real_gemini/tools/gpt4v_tool.py",
"snippet": "class GPT4VTool(object):\n _name_ = \"GPT-4-Vision\"\n _description_ = \"这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\\\"question\\\": QUESTION, \\\"image_input\\\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\\\"question\\\": QUESTION, \\\"image_input\\\": IMAGE_PATH_OR_DIR}}.\"\n _return_direct_ = False\n\n def __init__(self):\n self._gpt4v = ChatOpenAI(\n model=\"gpt-4-vision-preview\",\n max_tokens=256)\n self.max_dialog_turn = 3\n self.history = ChatMessageHistory()\n self.history.add_message(\n SystemMessage(\n content=[\n {\"type\": \"text\", \"text\": _OPEN_AI_SYSTEM_PROMPT}\n ]\n )\n )\n \n def inference(self, input_str: str):\n input_dict = json.loads(input_str)\n image_path = input_dict[\"image_input\"]\n if os.path.isdir(image_path):\n image_paths = [\n os.path.join(image_path, path) for path in os.listdir(image_path)]\n else:\n image_paths = [image_path]\n base64_images = []\n for image_path in image_paths:\n base64_image = image2base64(load_image(image_path))\n base64_images.append(f\"data:image/jpeg;base64,{base64_image}\")\n\n human_contents = []\n human_contents.append({\"type\": \"text\", \"text\": input_dict[\"question\"]})\n for base64_image in base64_images:\n human_contents.append({\n \"type\": \"image_url\",\n \"image_url\": {\"url\": base64_image}\n }) # images\n self.history.add_message(HumanMessage(content=human_contents))\n\n response_msg = self._gpt4v.invoke(self.history.messages)\n # 历史只保留一张图\n self.history.messages.pop()\n human_contents = []\n human_contents.append({\"type\": \"text\", \"text\": input_dict[\"question\"]})\n human_contents.append({\"type\": \"image_url\", \"image_url\": {\"url\": base64_images[-1]}})\n self.history.add_message(HumanMessage(content=human_contents))\n self.history.add_message(response_msg)\n # 只保留self.max_dialog_turn轮对话\n if len(self.history.messages) > 1 + 2 * self.max_dialog_turn:\n self.history.messages = [self.history.messages[0]] + self.history.messages[-2 * self.max_dialog_turn: ]\n # print(self.history.messages)\n return response_msg.content"
},
{
"identifier": "TaiyiGeneralTool",
"path": "real_gemini/tools/image_generation_tool.py",
"snippet": "class TaiyiGeneralTool(object):\n _name_ = \"taiyi general image generation\"\n _description_ = \"Taiyi General的API,用于从文本生成图像。当你需要从文本描述生成图像时非常有用。输入应该是文本,即图像描述。A wrapper around Taiyi General API for text to image generation. Useful for when you need to generate images from a text description. Input should be text, i.e, an image description.\"\n _return_direct_ = True\n\n def __init__(self):\n self.prompter = ChatOpenAI(\n model=\"gpt-3.5-turbo\",\n max_tokens=256)\n self.host = os.getenv(\"IMAGE_GENERATION_SERVER_HOST\")\n self.port = os.getenv(\"IMAGE_GENERATION_SERVER_PORT\")\n \n def _upgrade_prompt(self, prompt):\n messages = []\n messages.append(\n SystemMessage(\n content=[\n {\"type\": \"text\", \"text\": \"我正在使用一个Stable Diffusion的AI图像生成工具,我想让你充当我的prompt优化生成器。在我想生成的主题后,请帮我添加各种关键词,使得我的主题的描述更加详细,添加的关键词包括:主体、背景效果、风格、拍摄方式。例如,如果我输入“跑车”,你将生成关键词,如:“跑车,高清,4k,真实细致的跑车摄影,速度动态模糊,赛车场,城市环境,风景道路,戏剧性的天空”\"}\n ]\n )\n )\n messages.append(HumanMessage(content=prompt))\n\n response_msg = self.prompter.invoke(messages)\n new_prompt = response_msg.content\n return new_prompt\n\n def inference(self, inputs):\n url = f\"http://{self.host}:{self.port}/taiyi_xl_general_base64/\"\n headers = {\"Content-Type\": \"application/json\"}\n new_prompt = self._upgrade_prompt(inputs)\n print(\"new prompt:\", new_prompt)\n data = {\"prompt\": new_prompt}\n response = requests.post(url, headers=headers, data=json.dumps(data))\n response = response.json()\n b64_image = response[\"image_base64\"]\n \n # write to file\n save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n save_dir = os.path.join(save_dir, \"test\", \"outputs\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n md5 = hashlib.md5()\n md5.update(inputs.encode('utf-8'))\n filename = os.path.join(save_dir, md5.hexdigest() + \".png\")\n save_or_show_image(b64_image, filename)\n \n print(\"image filename:\", filename)\n\n result = {\"text\": \"好的,我用太乙为你生成了一张图片。\", \"image\": filename}\n return json.dumps(result, ensure_ascii=False)"
},
{
"identifier": "Text2MusicTool",
"path": "real_gemini/tools/music_tool.py",
"snippet": "class Text2MusicTool(object):\n _name_ = \"Text2Music\"\n _description_ = \"这个工具是从文本生成音乐的调用接口,它可以根据一段文字,生成符合这段文字内容的音乐风格。本工具的输入是一段文本指令。This tool is an API that generates music from text. It can create music that matches the style of the given text content. The input for this tool is a text command.\"\n _return_direct_ = True\n\n def __init__(self):\n self.translator = ChatOpenAI(\n model=\"gpt-3.5-turbo\",\n max_tokens=256)\n self.host = os.getenv(\"MUSIC_SERVER_HOST\")\n self.port = os.getenv(\"MUSIC_SERVER_PORT\")\n \n def inference(self, input_str: str):\n messages = []\n messages.append(\n SystemMessage(\n content=[\n {\"type\": \"text\", \"text\": \"你是一个翻译专家,请将我输入的中文翻译成英文。\"}\n ]\n )\n )\n messages.append(HumanMessage(content=input_str))\n\n response_msg = self.translator.invoke(messages)\n input_str_en = response_msg.content\n # print(input_str_en)\n\n url = f\"http://{self.host}:{self.port}/text_to_music\"\n data = {\"text\": input_str_en}\n music_response = requests.post(url, data=data)\n music_response = music_response.json()\n\n # write to file\n save_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n save_dir = os.path.join(save_dir, \"test\", \"outputs\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n md5 = hashlib.md5()\n md5.update(input_str_en.encode('utf-8'))\n filename = os.path.join(save_dir, md5.hexdigest() + \".wav\")\n \n raw_data = music_response[\"audio\"]\n sampling_rate = music_response[\"sampling_rate\"]\n scipy.io.wavfile.write(\n filename,\n rate=sampling_rate,\n data=np.frombuffer(base64.b64decode(raw_data), np.float32),\n )\n print(\"music filename:\", filename)\n\n result = {\"text\": \"好的,为你生成了一段音乐。\", \"audio\": filename}\n return json.dumps(result, ensure_ascii=False)"
},
{
"identifier": "Image2PoseTool",
"path": "real_gemini/tools/controlnet_tool.py",
"snippet": "class Image2PoseTool(object):\n _name_=\"Pose-Detection-On-Image\"\n _description_=\"\"\"当你想要检测图片中的人体姿态时很有用。比如:生成这张图片中的人体姿态,或者从这张图片中生成人体姿态。输入到这个工具的应该是一个字符串,表示图片的路径或者图片所在的文件夹路径。useful when you want to detect the human pose of the image. like: generate human poses of this image, or generate a pose image from this image. The input to this tool should be a string, representing the image_dir or image_paths.\"\"\"\n _return_direct_ = False\n \n def __init__(self):\n self.host = \"0.0.0.0\"\n self.port = 6679\n\n\n def inference(self, image_input: str):\n url = f\"http://{self.host}:{self.port}/image_to_pose\"\n data = {\"image_input\": image_input}\n pose_response = requests.post(url, data=data)\n return pose_response.text"
},
{
"identifier": "SegmentingTool",
"path": "real_gemini/tools/sam_tool.py",
"snippet": "class SegmentingTool(object):\n _name_=\"Segment the Image\"\n _description_=\"当你想要对图片中的所有物体进行分割,但是不想对某个物体进行分割时很有用。比如:分割这张图片中的所有物体,或者在这张图片上生成分割结果,或者对这张图片进行分割,或者分割这张图片中的所有物体。输入到这个工具的应该是一个字符串,表示图片的路径或者图片所在的文件夹路径。useful when you want to segment all the part of the image, but not segment a certain object.like: segment all the object in this image, or generate segmentations on this image, or segment the image, or perform segmentation on this image, or segment all the object in this image. The input to this tool should be a string, representing the image_dir or image_paths.\"\n _return_direct_ = False\n \n def __init__(self):\n # self.host = \"localhost\"\n self.host = \"0.0.0.0\"\n self.port = 6680\n\n def inference(self,image_input: str):\n url = f\"http://{self.host}:{self.port}/segmenting\"\n data = {\"image_input\": image_input}\n seg_response = requests.post(url, data=data)\n return seg_response.text"
},
{
"identifier": "Text2BoxTool",
"path": "real_gemini/tools/dino_tool.py",
"snippet": "class Text2BoxTool(object):\n _name_=\"Detect the Give Object\"\n _description_=\"\"\"当你只想检测或者找出图片中的某个物体时很有用。输入到这个工具的应该是一个被逗号分隔成两部分的字符串,分别表示图片的路径或者图片所在的文件夹路径和要检测的物体。useful when you only want to detect or find out given objects in the picture. The input to this tool should be a comma separated string of two, representing the image_dir or image_paths and the object to be detected, respectively.\"\"\"\n _return_direct_ = False\n \n def __init__(self):\n self.host = \"0.0.0.0\"\n self.port = 6681\n \n def inference(self, inputs):\n url = f\"http://{self.host}:{self.port}/text_to_box\"\n data = {\"inputs\": inputs}\n box_response = requests.post(url, data=data)\n return box_response.text"
},
{
"identifier": "ImageRemoveTool",
"path": "real_gemini/tools/imageediting_tool.py",
"snippet": "class ImageRemoveTool(object):\n _name_ = \"Remove Something From The Image\"\n _description_ = \"当你想要从图片中移除某个物体或者某个物体的某个部分时很有用。输入到这个工具的应该是一个被逗号分隔成两部分的字符串,分别表示图片的路径或者图片所在的文件夹路径和要移除的物体。useful when you want to remove the object or something from the photofrom its description or location. The input to this tool should be a comma separated string of two, representing the image_dir or image_paths and the object need to be removed.\"\n _return_direct_ = False\n\n def __init__(self):\n self.host = \"0.0.0.0\"\n # self.host = \"localhost\"\n self.port = 6682\n \n def inference(self, inputs):\n url = f\"http://{self.host}:{self.port}/image_remove\"\n data = {\"input\": inputs}\n update_response = requests.post(url, data=data)\n return update_response.text"
},
{
"identifier": "ImageReplaceTool",
"path": "real_gemini/tools/imageediting_tool.py",
"snippet": "class ImageReplaceTool(object):\n _name_=\"Replace Something From The Image\"\n _description_=\"当你想要用另一个物体替换图片中的某个物体或者某个物体的某个部分时很有用。输入到这个工具的应该是一个被逗号分隔成三部份的字符串,分别表示图片的路径或者图片所在的文件夹路径,要替换的物体以及要替换成的物体。\\nuseful when you want to replace an object from the object description or location with another object from its description.\\nThe input to this tool should be a comma separated string of three, representing the image_dir or image_paths, the object to be replaced, the object to be replaced with.\"\n _return_direct_ = False\n\n def __init__(self):\n self.host = \"0.0.0.0\"\n # self.host = \"localhost\"\n self.port = 6682\n\n def inference(self, inputs):\n url = f\"http://{self.host}:{self.port}/image_replace\"\n data = {\"input\": inputs}\n update_response = requests.post(url, data=data)\n return update_response.text"
},
{
"identifier": "WeatherTool",
"path": "real_gemini/tools/weather_tool.py",
"snippet": "class WeatherTool(object):\n _name_ = \"WeatherAPI\"\n _description_ = \"这个工具是查询当前和未来天气的调用接口,它可以根据一段文字,这个文字包含一个城市,这个接口可以查询这个城市的天气,注意,本工具的输入是一个字符串。This tool is a weather query API that can retrieve the current and future weather based on a given text, which includes a city name. The API is capable of querying the weather for the specified city. Please note that the input for this tool is a string.\"\n _return_direct_ = False\n\n def __init__(self):\n self.gaode_api_key = os.getenv(\"GAODE_API_KEY\")\n \n def inference(self, input_str: str):\n city_name = input_str\n district_name = input_str\n params = self._get_params(city_name, district_name)\n return self._process_response(self._results(params))\n\n def _get_params(self, city_name: str, district_name: str) -> Dict[str, str]:\n \"\"\"Get parameters for GaoDeAPI.\"\"\"\n adcode = self._get_adcode(city_name, district_name)\n params = {\n \"api_key\": self.gaode_api_key,\n \"adcode\": adcode\n }\n print(params)\n return params\n\n def _results(self, params: dict) -> dict:\n \"\"\"Run query through GaoDeAPI and return the raw result.\"\"\"\n # # with HiddenPrints():\n response = requests.get(\"https://restapi.amap.com/v3/weather/weatherInfo?\", {\n \"key\": self.gaode_api_key,\n \"city\": params[\"adcode\"],\n \"extensions\": \"all\",\n \"output\": \"JSON\"\n })\n res = json.loads(response.content)\n return res\n\n def _process_response(self, res: dict) -> str:\n \"\"\"Process response from GaoDeAPI.\"\"\"\n if res[\"status\"] == '0':\n return \"输入的城市信息可能有误或未提供城市信息\"\n if res[\"forecasts\"] is None or len(res[\"forecasts\"]) == 0:\n return \"输入的城市信息可能有误或未提供城市信息\"\n res[\"currentTime\"] = datetime.datetime.now()\n return json.dumps(res[\"forecasts\"], ensure_ascii=False)\n\n def _get_adcode(self, city_name: str, district_name: str) -> str:\n \"\"\"Obtain the regional code of a city based on its name and district/county name.\"\"\"\n # 读取Excel文件\n work_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n df = pd.read_excel(\n os.path.join(work_dir, \"test/AMap_adcode_citycode.xlsx\"), sheet_name=\"sheet1\"\n )\n # print(df)\n # 将所有NaN值转换成0\n df = df.dropna()\n if district_name is not None and district_name != '':\n # 根据'city_name'列检索数据\n result = df[df['中文名'].str.contains(district_name)]\n json_data = result.to_json(orient='records', force_ascii=False)\n # 解析 JSON 数据\n json_array = json.loads(json_data)\n\n # 如果区域名称为空,用城市名称去查\n if (district_name is None or district_name == '') and city_name != '':\n # 根据'city_name'列检索数据\n result = df[df['中文名'].str.contains(city_name)]\n json_data = result.to_json(orient='records', force_ascii=False)\n # 解析 JSON 数据\n json_array = json.loads(json_data)\n\n # 如果没数据直接返回空\n if len(json_array) == 0:\n # 根据'citycode'列检索数据\n result = df[df['中文名'].str.contains(city_name)]\n json_data = result.to_json(orient='records', force_ascii=False)\n # 解析 JSON 数据\n json_array = json.loads(json_data)\n\n # 如果只有一条直接返回\n if len(json_array) == 1:\n return json_array[0]['adcode']\n\n # 如果有多条再根据district_name进行检索\n if len(json_array) > 1:\n for obj in json_array:\n if district_name is not None and district_name != '' and district_name in obj['中文名']:\n return obj['adcode']\n if city_name in obj['district_name']:\n return obj['adcode']\n return \"输入的城市信息可能有误或未提供城市信息\""
},
{
"identifier": "ConvoOutputParser",
"path": "real_gemini/utils/output_parser.py",
"snippet": "class ConvoOutputParser(AgentOutputParser):\n \"\"\"Output parser for the conversational agent.\"\"\"\n\n ai_prefix: str = \"AI\"\n \"\"\"Prefix to use before AI output.\"\"\"\n\n def get_format_instructions(self) -> str:\n return FORMAT_INSTRUCTIONS\n\n def parse(self, text: str) -> Union[AgentAction, AgentFinish]:\n if f\"{self.ai_prefix}:\" in text:\n return AgentFinish(\n {\"output\": text.split(f\"{self.ai_prefix}:\")[-1].strip()}, text\n )\n if \"###\" in text:\n return AgentFinish(\n {\"output\": \"###\"}, text\n )\n regex = r\"Action: (.*?)[\\n]*Action Input: ([\\s\\S]*)\"\n match = re.search(regex, text, re.DOTALL)\n if not match:\n raise OutputParserException(f\"Could not parse LLM output: `{text}`\")\n action = match.group(1)\n action_input = match.group(2)\n return AgentAction(action.strip(), action_input.strip(\" \").strip('\"'), text)\n\n @property\n def _type(self) -> str:\n return \"conversational\""
},
{
"identifier": "PREFIX",
"path": "real_gemini/utils/agent_prompt.py",
"snippet": "PREFIX = \"\"\"Assistant is a large language model trained by IDEA-CCNL.\n\nPlease note, the scenario that the assistant is facing is: a user is interacting with the assistant through a camera for Q&A. The system will first convert the user's voice into text and then input it into the assistant. At the same time, the system will save keyframe images to a directory for image understanding. The assistant needs to conduct multimodal Q&A and tool invocation based on the images and text.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text or images, audio, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific multimodal question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\n\n\nTOOLS:\n------\n\nAssistant has access to the following tools:\"\"\""
},
{
"identifier": "FORMAT_INSTRUCTIONS",
"path": "real_gemini/utils/agent_prompt.py",
"snippet": "FORMAT_INSTRUCTIONS = \"\"\"To use a tool, please use the following format:\n\n```\nThought: Do I need to use a tool? Yes\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n```\n\nWhen you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:\n\n```\nThought: Do I need to use a tool? No, and {ai_prefix}: [your response here]\n```\nMAKE SURE your response MUST in Chinese.\n\nSince the text you input is obtained from the ASR service, there may be some recognition noise. If your input is some meaningless text or incoherent sentences, please do not call the tool and directly return \"###\".\n由于你输入的文字是由ASR服务得到的,所以可能存在一些识别噪音。假如你的输入是一些没有意义的文字或者不通顺的句子时,请不要调用工具,并直接返回\\\"###\\\"。\n\nYour input is an text instruction and key frame images captured by an AI multimodal assistant. Please answer my questions based on these images. Please note that the images and questions may not be related, and you need to make your own judgment.\n\"\"\""
},
{
"identifier": "SUFFIX",
"path": "real_gemini/utils/agent_prompt.py",
"snippet": "SUFFIX = \"\"\"Begin!\n\nPrevious conversation history:\n{chat_history}\n\nNew input: {input}\n{agent_scratchpad}\"\"\""
}
] | import os
import re
import json
from langchain.chat_models import ChatOpenAI
from langchain.agents.tools import Tool
from langchain.agents import initialize_agent, load_tools, AgentType
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from .tools.gpt4v_tool import GPT4VTool
from .tools.image_generation_tool import TaiyiGeneralTool
from .tools.music_tool import Text2MusicTool
from .tools.controlnet_tool import Image2PoseTool
from .tools.sam_tool import SegmentingTool
from .tools.dino_tool import Text2BoxTool
from .tools.imageediting_tool import ImageRemoveTool, ImageReplaceTool
from .tools.weather_tool import WeatherTool
from .utils.output_parser import ConvoOutputParser
from .utils.agent_prompt import PREFIX, FORMAT_INSTRUCTIONS, SUFFIX | 5,879 | #encoding=utf8
REGISTERED_TOOL_CLASSES = [
GPT4VTool,
TaiyiGeneralTool,
Text2MusicTool,
| #encoding=utf8
REGISTERED_TOOL_CLASSES = [
GPT4VTool,
TaiyiGeneralTool,
Text2MusicTool, | SegmentingTool, | 4 | 2023-12-15 04:09:37+00:00 | 8k |
aiim-research/GRETEL | src/evaluation/evaluator_manager_do.py | [
{
"identifier": "DatasetFactory",
"path": "src/dataset/dataset_factory.py",
"snippet": "class DatasetFactory(Factory):\n\n def get_dataset(self, dataset_snippet):\n return self._get_object(dataset_snippet)\n \n def get_datasets(self, config_list):\n return [self.get_dataset(obj) for obj in config_list]"
},
{
"identifier": "EvaluationMetricFactory",
"path": "src/evaluation/evaluation_metric_factory.py",
"snippet": "class EvaluationMetricFactory:\n\n def __init__(self,config_dict) -> None:\n self._config_dict = config_dict\n self._evaluation_metric_id_counter = 0\n\n def get_evaluation_metric_by_name(self, metric_dict) -> EvaluationMetric:\n metric_name = metric_dict['name']\n metric_parameters = metric_dict['parameters']\n\n if(metric_name == 'graph_edit_distance'):\n return self.get_graph_edit_distance_metric(config_dict=metric_dict)\n\n elif metric_name == 'oracle_calls':\n return self.get_oracle_calls_metric(config_dict=metric_dict)\n\n elif metric_name == 'sparsity':\n return self.get_sparsity_metric(config_dict=metric_dict)\n\n elif metric_name == 'correctness':\n return self.get_correctness_metric(config_dict=metric_dict)\n\n elif metric_name == 'fidelity':\n return self.get_fidelity_metric(config_dict=metric_dict)\n \n elif metric_name == 'fidelity_node':\n return self.get_fidelity_node_metric(config_dict=metric_dict)\n\n elif metric_name == 'oracle_accuracy':\n return self.get_oracle_accuracy_metric(config_dict=metric_dict)\n\n elif metric_name == 'smiles_levenshtein':\n return self.get_smiles_levenshtein_metric(config_dict=metric_dict)\n \n elif metric_name == 'oracle_accuracy_node':\n return self.get_oracle_accuracy_node_metric(config_dict=metric_dict)\n \n elif metric_name == 'dumper':\n return self.get_dumper_metric(config_dict=metric_dict)\n \n elif metric_name == 'runtime':\n return self.get_runtime_metric(config_dict=metric_dict)\n\n else:\n raise ValueError('''The provided evaluation metric name does not match any evaluation\n metric provided by the factory''')\n \n def get_runtime_metric(self, config_dict=None) -> EvaluationMetric:\n result = RuntimeMetric(config_dict)\n return result\n\n def get_dumper_metric(self, config_dict=None) -> EvaluationMetric:\n result = InstancesDumper(self._config_dict,config_dict)\n return result\n\n def get_correctness_metric(self, config_dict=None) -> EvaluationMetric:\n result = CorrectnessMetric(config_dict)\n return result\n\n def get_oracle_calls_metric(self, config_dict=None) -> EvaluationMetric:\n result = OracleCallsMetric(config_dict)\n return result\n\n def get_graph_edit_distance_metric(self, node_insertion_cost=1.0, node_deletion_cost=1.0, \n edge_insertion_cost=1.0, edge_deletion_cost=1.0, undirected=True, config_dict=None) -> EvaluationMetric:\n \n result = GraphEditDistanceMetric(node_insertion_cost, node_deletion_cost, edge_insertion_cost, \n edge_deletion_cost, undirected, config_dict)\n\n return result\n\n\n def get_sparsity_metric(self, config_dict=None) -> EvaluationMetric:\n result = SparsityMetric(config_dict)\n return result\n\n\n def get_fidelity_metric(self, config_dict=None) -> EvaluationMetric:\n result = FidelityMetric(config_dict)\n return result\n\n \n def get_fidelity_node_metric(self, config_dict=None) -> EvaluationMetric:\n result = FidelityNodeMetric(config_dict)\n return result\n\n def get_oracle_accuracy_metric(self, config_dict=None) -> EvaluationMetric:\n result = OracleAccuracyMetric(config_dict)\n return result\n\n def get_oracle_accuracy_node_metric(self, config_dict=None) -> EvaluationMetric:\n result = OracleAccuracyNodeMetric(config_dict)\n return result\n \n def get_smiles_levenshtein_metric(self, config_dict=None) -> EvaluationMetric:\n result = SmilesLevenshteinMetric(config_dict)\n return result"
},
{
"identifier": "Evaluator",
"path": "src/evaluation/evaluator_base.py",
"snippet": "class Evaluator(ABC):\n _logger = GLogger.getLogger()\n\n def __init__(self,scope, data, oracle: Oracle, explainer: Explainer, evaluation_metrics, results_store_path, run_number=0) -> None:\n super().__init__()\n self._scope = scope\n self._name = 'Evaluator_for_' + explainer.name + '_using_' + oracle.name\n self._data = data\n self._oracle = oracle\n self._oracle.reset_call_count()\n self._explainer = explainer\n self._results_store_path = results_store_path\n self._evaluation_metrics = evaluation_metrics\n self._run_number = run_number\n self._explanations = []\n \n \n \n\n # Building the config file to write into disk\n evaluator_config = {'dataset': clean_cfg(data.local_config), 'oracle': clean_cfg(oracle.local_config), 'explainer': clean_cfg(explainer.local_config), 'metrics': []}\n evaluator_config['scope']=self._scope\n evaluator_config['run_id']=self._run_number\n evaluator_config['fold_id']=self._explainer.fold_id\n evaluator_config['experiment']=data.context.conf[\"experiment\"]\n evaluator_config['store_paths']=data.context.conf[\"store_paths\"]\n evaluator_config['orgin_config_paths'] = data.context.config_file\n \n \n for metric in evaluation_metrics:\n evaluator_config['metrics'].append(metric._config_dict)\n # creatig the results dictionary with the basic info\n self._results = {}\n self._complete = {'config':evaluator_config, \"results\":self._results}\n\n \n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, new_name):\n self._name = new_name\n\n @property\n def dataset(self):\n return self._data\n\n @dataset.setter\n def dataset(self, new_dataset):\n self._data = new_dataset\n\n @property\n def explanations(self):\n return self._explanations\n\n @explanations.setter\n def explanations(self, new_explanations_list):\n self._explanations = new_explanations_list\n\n\n def get_instance_explanation_pairs(self):\n # Check if the explanations were generated already\n if len(self.explanations) < 1:\n return None\n\n # iterates over the original instances and the explanations\n n_ins = len(self.dataset.instances)\n result = []\n for i in range(0, n_ins):\n result.append((self.dataset.instances[i], self.explanations[i]))\n\n return result\n\n\n def get_instance_and_counterfactual_classifications(self):\n # Check if the explanations were generated already\n if len(self.explanations) < 1:\n return None\n\n # iterates over the original instances and the explanations\n n_ins = len(self.dataset.instances)\n result = []\n for i in range(0, n_ins):\n label_inst = self._oracle.predict(self.dataset.instances[i])\n label_cf = self._oracle.predict(self.explanations[i])\n self._oracle._call_counter -= 2 \n\n result.append({'instance_id': self.dataset.instances[i].id,\n 'ground_truth_label': self.dataset.instances[i].graph_label,\n 'instance_label': label_inst,\n 'counterfactual_label': label_cf})\n\n return result\n\n\n def evaluate(self):\n for m in self._evaluation_metrics:\n self._results[Context.get_fullname(m)] = []\n\n # If the explainer was trained then evaluate only on the test set, else evaluate on the entire dataset\n fold_id = self._explainer.fold_id\n if fold_id > -1 :\n test_indices = self.dataset.splits[fold_id]['test'] \n test_set = [i for i in self.dataset.instances if i.id in test_indices]\n else:\n test_set = self.dataset.instances \n\n for inst in test_set:\n self._logger.info(\"Evaluating instance with id %s\", str(inst.id))\n\n for metric in self._evaluation_metrics:\n if(metric._special):\n val, counterfactual = metric.evaluate(inst, None, self._oracle,self._explainer,self._data)\n self._results[Context.get_fullname(metric)].append({\"id\":str(inst.id),\"value\":val})\n self._explanations.append(counterfactual)\n\n self._real_evaluate(inst, counterfactual,self._oracle,self._explainer,self._data)\n self._logger.info('evaluated instance with id %s', str(inst.id))\n\n self._logger.info(self._results)\n \n self.write_results(fold_id)\n\n\n def _real_evaluate(self, instance, counterfactual, oracle = None, explainer=None, dataset=None):\n is_alt = False\n if (oracle is None):\n is_alt = True\n oracle = self._oracle\n\n for metric in self._evaluation_metrics:\n if(not metric._special): \n m_result = metric.evaluate(instance, counterfactual, oracle, explainer,dataset)\n self._results[Context.get_fullname(metric)].append({\"id\":str(instance.id),\"value\":m_result})\n\n\n def write_results(self,fold_id):\n hash_info = {\"scope\":self._scope,\n \"dataset\":self._data.name,\n \"oracle\":self._oracle.name,\n \"explainer\":self._explainer.name\n }\n \n self._complete['hash_ids']=hash_info\n\n output_path = os.path.join(self._results_store_path, self._scope)\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n \n output_path = os.path.join(output_path, self._data.name)\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n output_path = os.path.join(output_path, self._oracle.name)\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n output_path = os.path.join(output_path, self._explainer.name)\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n\n results_uri = os.path.join(output_path, 'results_' + str(fold_id) + '_'+ str(self._run_number)+'.json')\n\n with open(results_uri, 'w') as results_writer:\n results_writer.write(jsonpickle.encode(self._complete))"
},
{
"identifier": "ExplainerFactory",
"path": "src/explainer/explainer_factory.py",
"snippet": "class ExplainerFactory(Factory):\n\n def get_explainer(self, explainer_snippet, dataset, oracle):\n inject_dataset(explainer_snippet, dataset)\n inject_oracle(explainer_snippet, oracle) \n return self._get_object(explainer_snippet)\n \n def get_explainers(self, config_list, dataset, oracle):\n return [self.get_explainer(obj, dataset, oracle) for obj in config_list]"
},
{
"identifier": "EmbedderFactory",
"path": "src/oracle/embedder_factory.py",
"snippet": "class EmbedderFactory(Factory):\n\n def get_embedder(self, embedder_snippet, dataset):\n inject_dataset(embedder_snippet, dataset) \n return self._get_object(embedder_snippet)"
},
{
"identifier": "OracleFactory",
"path": "src/oracle/oracle_factory.py",
"snippet": "class OracleFactory(Factory): \n def get_oracle(self, oracle_snippet, dataset):\n inject_dataset(oracle_snippet, dataset)\n return self._get_object(oracle_snippet)\n \n def get_oracles(self, config_list, dataset):\n return [self.get_oracle(obj, dataset) for obj in config_list]\n \n \"\"\"def get_oracle_by_name(self, oracle_dict, dataset: Dataset, emb_factory: EmbedderFactory) -> Oracle:\n\n oracle_name = oracle_dict['name']\n oracle_parameters = oracle_dict['parameters']\n\n # Check if the oracle is a KNN classifier\n if oracle_name == 'knn':\n if not 'k' in oracle_parameters:\n raise ValueError('''The parameter \"k\" is required for knn''')\n if not 'embedder' in oracle_parameters:\n raise ValueError('''knn oracle requires an embedder''')\n\n emb = emb_factory.get_embedder_by_name(oracle_parameters['embedder'], dataset)\n\n return self.get_knn(dataset, emb, oracle_parameters['k'], -1, oracle_dict)\n\n # Check if the oracle is an SVM classifier\n elif oracle_name == 'svm':\n if not 'embedder' in oracle_parameters:\n raise ValueError('''svm oracle requires an embedder''')\n\n emb = emb_factory.get_embedder_by_name(oracle_parameters['embedder'], dataset)\n\n return self.get_svm(dataset, emb, -1, oracle_dict)\n\n # Check if the oracle is an ASD Custom Classifier\n elif oracle_name == 'asd_custom_oracle':\n return self.get_asd_custom_oracle(oracle_dict)\n\n # Check if the oracle is an ASD Custom Classifier\n elif oracle_name == 'gcn-tf':\n return self.get_gcn_tf(dataset, -1, oracle_dict)\n\n elif oracle_name == 'gcn_synthetic_pt':\n return self.get_pt_syn_oracle(dataset, -1, oracle_dict)\n \n # Check if the oracle is a Triangles-Squares Custom Classifier\n elif oracle_name == 'trisqr_custom_oracle':\n return self.get_trisqr_custom_oracle(oracle_dict)\n\n # Check if the oracle is a Tree-Cycles Custom Classifier\n elif oracle_name == 'tree_cycles_custom_oracle':\n return self.get_tree_cycles_custom_oracle(oracle_dict) \n \n elif oracle_name == 'cf2':\n if not 'converter' in oracle_parameters:\n raise ValueError('''The parameter \"converter\" is required for cf2''')\n \n converter_name = oracle_parameters['converter'].get('name')\n if not converter_name:\n raise ValueError('''The parameter \"name\" for the converter is required for cf2''')\n \n converter = None\n feature_dim = oracle_parameters.get('feature_dim', 36)\n weight_dim = oracle_parameters.get('weight_dim', 28)\n if converter_name == 'tree_cycles':\n converter = CF2TreeCycleConverter(feature_dim=feature_dim)\n else:\n converter = DefaultFeatureAndWeightConverter(feature_dim=feature_dim,\n weight_dim=weight_dim)\n lr = oracle_parameters.get('lr', 1e-3)\n batch_size_ratio = oracle_parameters.get('batch_size_ratio', .1)\n weight_decay = oracle_parameters.get('weight_decay', 5e-4)\n epochs = oracle_parameters.get('epochs', 100)\n fold_id = oracle_parameters.get('fold_id', 0)\n threshold = oracle_parameters.get('threshold', .5)\n \n return self.get_cf2(dataset, converter, feature_dim, weight_dim, lr,\n weight_decay, epochs, batch_size_ratio,\n threshold, fold_id, oracle_dict)\n # If the oracle name does not match any oracle in the factory\n else:\n raise ValueError('''The provided oracle name does not match any oracle provided by the factory''')\n\n def get_cf2(self, dataset: Dataset, converter: ConverterAB, in_dim: int, h_dim: int, lr: float,\n weight_decay: float, epochs: int, batch_size_ratio: float, threshold: float, \n fold_id: int, config_dict=None) -> Oracle:\n clf = CF2Oracle(id=self._oracle_id_counter,\n oracle_store_path=self._oracle_store_path,\n converter=converter,\n in_dim=in_dim,\n h_dim=h_dim,\n lr=lr,\n weight_decay=weight_decay,\n epochs=epochs,\n threshold=threshold,\n batch_size_ratio=batch_size_ratio,\n fold_id=fold_id,\n config_dict=config_dict)\n self._oracle_id_counter += 1\n clf.fit(dataset, split_i=fold_id)\n return clf\n\n def get_knn(self, data: Dataset, embedder: Embedder, k, split_index=-1, config_dict=None) -> Oracle:\n embedder.fit(data)\n clf = KnnOracle(id=self._oracle_id_counter,oracle_store_path=self._oracle_store_path, emb=embedder, k=k, config_dict=config_dict)\n self._oracle_id_counter +=1\n clf.fit(dataset=data, split_i=split_index)\n return clf\n\n def get_svm(self, data: Dataset, embedder: Embedder, split_index=-1, config_dict=None) -> Oracle:\n embedder.fit(data)\n clf = SvmOracle(id=self._oracle_id_counter, oracle_store_path=self._oracle_store_path, emb=embedder, config_dict=config_dict)\n self._oracle_id_counter +=1\n clf.fit(dataset=data, split_i=split_index)\n return clf\n\n def get_asd_custom_oracle(self, config_dict=None) -> Oracle:\n clf = ASDCustomOracle(id=self._oracle_id_counter, oracle_store_path=self._oracle_store_path, config_dict=config_dict)\n self._oracle_id_counter +=1\n return clf\n\n def get_gcn_tf(self, data: Dataset, split_index=-1, config_dict=None) -> Oracle:\n clf = TfGCNOracle(id=self._oracle_id_counter, oracle_store_path=self._oracle_store_path, config_dict=config_dict)\n self._oracle_id_counter +=1\n clf.fit(data, split_index)\n return clf\n \n def get_pt_syn_oracle(self, data: Dataset, split_index=-1, config_dict=None) -> Oracle:\n clf = SynNodeOracle(id=self._oracle_id_counter, oracle_store_path=self._oracle_store_path, config_dict=config_dict)\n self._oracle_id_counter +=1\n clf.fit(data, split_index)\n return clf\n \n def get_trisqr_custom_oracle(self, config_dict=None) -> Oracle:\n clf = TrianglesSquaresCustomOracle(id=self._oracle_id_counter, oracle_store_path=self._oracle_store_path, config_dict=config_dict)\n self._oracle_id_counter +=1\n return clf\n\n def get_tree_cycles_custom_oracle(self, config_dict=None) -> Oracle:\n clf = TreeCyclesCustomOracle(id=self._oracle_id_counter, oracle_store_path=self._oracle_store_path, config_dict=config_dict)\n self._oracle_id_counter +=1\n return clf\"\"\""
},
{
"identifier": "Context",
"path": "src/utils/context.py",
"snippet": "class Context(object):\n __create_key = object()\n __global = None\n\n def __init__(self, create_key, config_file):\n ###################################################\n self.factories = {\n \"datasets\": None,\n \"embedders\": None,\n \"oracles\": None,\n \"explainers\": None,\n \"metrics\": None\n }\n self.lock_release_tout : None\n ###################################################\n assert(create_key == Context.__create_key), \\\n \"Context objects must be created using Context.get_context\"\n \n # Check that the path to the config file exists\n if not os.path.exists(config_file):\n raise ValueError(f'''The provided config file does not exist. PATH: {config_file}''')\n\n self.config_file = config_file\n # Read the config dictionary inside the config path with the composer\n '''with open(self.config_file, 'r') as config_reader:\n self.conf = propagate(compose(jsonpickle.decode(config_reader.read()))) #First read config, then apply the compose and finally it propagate some homogeneous config params\n '''\n self.conf = propagate(compose(JsoncParser.parse_file(self.config_file)))\n\n self._scope = self.conf['experiment'].get('scope','default_scope')\n self.conf['experiment']['parameters']=self.conf['experiment'].get('parameters',{})\n self.conf['experiment']['parameters']['lock_release_tout']=self.conf['experiment']['parameters'].get('lock_release_tout',24*5) #Expressed in hours\n self.lock_release_tout = self.conf['experiment']['parameters']['lock_release_tout']\n\n self.raw_conf = copy.deepcopy(self.conf) #TODO: I think it is will be useless remove that in the future.\n\n self.__create_storages()\n \n @property\n def logger(self):\n GLogger._path = os.path.join(self.log_store_path, self._scope)\n return GLogger.getLogger()\n \n @classmethod\n def get_context(cls,config_file=None):\n if(Context.__global == None):\n if config_file is None:\n raise ValueError(f'''The configuration file must be passed to the method as PATH the first time. Now you did not pass as parameter.''')\n Context.__global = Context(Context.__create_key,config_file) \n return Context.__global\n \n @classmethod\n def get_by_pkvs(cls, conf, parent, key, value, son):\n for obj in conf[parent]:\n if(obj[key] == value):\n return obj[son]\n \n def get_path(self, obj):\n fullname = self.get_fullname(obj).split('.')\n qualifier = fullname[1] + '_store_path'\n store_dir = 'data'if not self._get_store_path(qualifier) else self._get_store_path(qualifier)\n\n # change this path when the dataset factories are finished\n if 'dataset' in obj.__dict__.keys():\n directory = os.path.join(store_dir, str(obj.dataset))\n else:\n directory = store_dir\n lock = Lock(directory+'.lck',lifetime=timedelta(hours=self.lock_release_tout))\n with lock:\n if not os.path.exists(directory):\n os.makedirs(directory)\n return os.path.join(directory, obj.name)\n \n @classmethod\n def get_fullname(cls, o):\n klass = o.__class__\n module = klass.__module__\n if module == 'builtins':\n return klass.__qualname__ # avoid outputs like 'builtins.str'\n return module + '.' + klass.__qualname__\n \n def get_name(self, inst, dictionary=None, alias=None): \n cls = inst.__class__.__name__ if not alias else alias\n dictionary= clean_cfg(inst.local_config) if dictionary is None else clean_cfg(dictionary)\n md5_hash = hashlib.md5() \n\n def flatten_dict(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = f'{parent_key}{sep}{k}' if parent_key else k\n if isinstance(v, dict):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n if dictionary is not None:\n payload = f'{cls}_' + '_'.join([f'{key}={value}' for key, value in flatten_dict(dictionary).items()])\n md5_hash.update(payload.encode('utf-8')) \n return cls+'-'+md5_hash.hexdigest()\n else:\n return cls\n \n def _get_store_path(self,value):\n return Context.get_by_pkvs(self.conf, \"store_paths\", \"name\",value,\"address\")\n \n def __create_storages(self):\n for store_path in self.conf['store_paths']:\n if not os.path.exists(store_path['address']):\n os.makedirs(store_path['address'])\n\n @property\n def dataset_store_path(self):\n return self._get_store_path(inspect.stack()[0][3])\n \n @property\n def embedder_store_path(self):\n return self._get_store_path(inspect.stack()[0][3])\n \n @property\n def oracle_store_path(self):\n return self._get_store_path(inspect.stack()[0][3])\n \n @property\n def explainer_store_path(self):\n return self._get_store_path(inspect.stack()[0][3])\n \n @property\n def output_store_path(self):\n return self._get_store_path(inspect.stack()[0][3])\n \n @property\n def log_store_path(self):\n return self._get_store_path(inspect.stack()[0][3])"
}
] | import random
from src.dataset.dataset_factory import DatasetFactory
from src.evaluation.evaluation_metric_factory import EvaluationMetricFactory
from src.evaluation.evaluator_base import Evaluator
from src.explainer.explainer_factory import ExplainerFactory
from src.oracle.embedder_factory import EmbedderFactory
from src.oracle.oracle_factory import OracleFactory
from src.utils.context import Context | 5,879 |
class EvaluatorManager:
def __init__(self, context: Context) -> None:
self.context = context
self._output_store_path = self.context.output_store_path
self._evaluators = []
#NOTE: Move the Factories creation outside?
self.context.factories['datasets'] = DatasetFactory(context)
self.context.factories['embedders'] = EmbedderFactory(context)
|
class EvaluatorManager:
def __init__(self, context: Context) -> None:
self.context = context
self._output_store_path = self.context.output_store_path
self._evaluators = []
#NOTE: Move the Factories creation outside?
self.context.factories['datasets'] = DatasetFactory(context)
self.context.factories['embedders'] = EmbedderFactory(context) | self.context.factories['oracles'] = OracleFactory(context) | 5 | 2023-12-15 16:34:16+00:00 | 8k |
modelscope/scepter | scepter/modules/annotator/openpose.py | [
{
"identifier": "BaseAnnotator",
"path": "scepter/modules/annotator/base_annotator.py",
"snippet": "class BaseAnnotator(BaseModel, metaclass=ABCMeta):\n para_dict = {}\n\n def __init__(self, cfg, logger=None):\n super().__init__(cfg, logger=logger)\n\n @torch.no_grad()\n @torch.inference_mode\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n @staticmethod\n def get_config_template():\n return dict_to_yaml('ANNOTATORS',\n __class__.__name__,\n BaseAnnotator.para_dict,\n set_name=True)"
},
{
"identifier": "ANNOTATORS",
"path": "scepter/modules/annotator/registry.py",
"snippet": "ANNOTATORS = Registry('ANNOTATORS', build_func=build_annotator)"
},
{
"identifier": "dict_to_yaml",
"path": "scepter/modules/utils/config.py",
"snippet": "def dict_to_yaml(module_name, name, json_config, set_name=False):\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n convert std dict to yaml\n :param module_name:\n :param json_config:\n :return:\n '''\n def convert_yaml_style(level=1,\n name='ENV',\n description='ENV PARA',\n default='',\n type_name='',\n is_sys=False):\n new_line = ''\n new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\\n'.format(\n '\\t' * (level - 1), name.upper(), description, type_name,\n f'\\'{default}\\'' if isinstance(default, str) else default)\n if is_sys:\n if name == '-':\n new_line += '{}{}\\n'.format('\\t' * (level - 1), name.upper())\n else:\n new_line += '{}{}:\\n'.format('\\t' * (level - 1), name.upper())\n else:\n # if isinstance(default, str):\n # default = f'\\'{default}\\''\n if default is None:\n new_line += '{}# {}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n else:\n new_line += '{}{}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n return new_line\n\n def parse_dict(json_config,\n level_num,\n parent_key,\n set_name=False,\n name='',\n parent_type='dict'):\n yaml_str = ''\n # print(level_num, json_config)\n if isinstance(json_config, dict):\n if 'value' in json_config:\n value = json_config['value']\n if isinstance(value, dict):\n assert len(value) < 1\n value = None\n description = json_config.get('description', '')\n yaml_str += convert_yaml_style(level=level_num - 1,\n name=parent_key,\n description=description,\n default=value,\n type_name=type(value).__name__)\n return True, yaml_str\n else:\n if len(json_config) < 1:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default='',\n type_name='')\n level_num += 1\n for k, v in json_config.items():\n if k == 'description':\n continue\n if isinstance(v, dict):\n is_final, new_yaml_str = parse_dict(v,\n level_num,\n k,\n parent_type='dict')\n if not is_final and parent_type == 'dict':\n description = v.get('description', '')\n yaml_str += convert_yaml_style(\n level=level_num - 1,\n name=k,\n description=description,\n default='',\n type_name='',\n is_sys=True)\n if not is_final and parent_type == 'list':\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=k,\n type_name='')\n yaml_str += new_yaml_str\n elif isinstance(v, list):\n base_yaml_str = convert_yaml_style(level=level_num - 1,\n name=k,\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += base_yaml_str\n for tup in v:\n is_final, new_yaml_str = parse_dict(\n tup, level_num, '-', parent_type='list')\n if not is_final:\n yaml_str += convert_yaml_style(level=level_num,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += new_yaml_str\n else:\n raise KeyError(\n f'json config {json_config} must be a dict of list'\n )\n\n elif isinstance(json_config, list):\n level_num += 1\n for tup in json_config:\n is_final, new_yaml_str = parse_dict(tup, level_num, '-')\n if not is_final:\n\n yaml_str += convert_yaml_style(level=level_num - 1,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n if set_name:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n raise KeyError(f'json config {json_config} must be a dict')\n return False, yaml_str\n\n if isinstance(json_config, dict):\n first_dict, sec_dict, third_dict = {}, {}, {}\n for key, value in json_config.items():\n if isinstance(value, dict) and len(value) > 0:\n first_dict[key] = value\n elif isinstance(value, dict) and len(value) == 0:\n sec_dict[key] = value\n elif isinstance(value, list):\n third_dict[key] = value\n else:\n raise f'Config {json_config} is illegal'\n json_config = {}\n json_config.update(first_dict)\n json_config.update(sec_dict)\n json_config.update(third_dict)\n\n yaml_str = f'[{module_name}] module yaml examples:\\n'\n level_num = 1\n base_yaml_str = convert_yaml_style(level=level_num,\n name=module_name,\n description='',\n default='',\n type_name='',\n is_sys=True)\n level_num += 1\n\n is_final, new_yaml_str = parse_dict(json_config,\n level_num,\n module_name,\n set_name=isinstance(json_config, list)\n and set_name,\n name=name)\n if not is_final:\n yaml_str += base_yaml_str\n if set_name and not isinstance(json_config, list):\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n yaml_str += new_yaml_str[1:]\n\n return yaml_str"
},
{
"identifier": "FS",
"path": "scepter/modules/utils/file_system.py",
"snippet": "FS = FileSystem()"
}
] | import math
import os
import cv2
import matplotlib
import numpy as np
import torch
import torch.nn as nn
from abc import ABCMeta
from collections import OrderedDict
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
from skimage.measure import label
from scepter.modules.annotator.base_annotator import BaseAnnotator
from scepter.modules.annotator.registry import ANNOTATORS
from scepter.modules.utils.config import dict_to_yaml
from scepter.modules.utils.file_system import FS | 5,088 | startend = list(
zip(
np.linspace(candA[i][0],
candB[j][0],
num=mid_num),
np.linspace(candA[i][1],
candB[j][1],
num=mid_num)))
vec_x = np.array([
score_mid[int(round(startend[ii][1])),
int(round(startend[ii][0])), 0]
for ii in range(len(startend))
])
vec_y = np.array([
score_mid[int(round(startend[ii][1])),
int(round(startend[ii][0])), 1]
for ii in range(len(startend))
])
score_midpts = np.multiply(
vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(
score_midpts) + min(
0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(np.nonzero(
score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([
i, j, score_with_dist_prior,
score_with_dist_prior + candA[i][2] +
candB[j][2]
])
connection_candidate = sorted(connection_candidate,
key=lambda x: x[2],
reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if (i not in connection[:, 3]
and j not in connection[:, 4]):
connection = np.vstack(
[connection, [candA[i][3], candB[j][3], s, i, j]])
if (len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array(
[item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][
indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB] != partBs[i]:
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[
partBs[i].astype(int),
2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) +
(subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[
partBs[i].astype(int),
2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(
candidate[connection_all[k][i, :2].astype(int),
2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
# subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
# candidate: x, y, score, id
return candidate, subset
| # -*- coding: utf-8 -*-
# Openpose
# Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose
# 2nd Edited by https://github.com/Hzzone/pytorch-openpose
# The implementation is modified from 3rd Edited Version by ControlNet
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
# transfer caffe model to pytorch which will match the layer name
def transfer(model, model_weights):
transfered_model_weights = {}
for weights_name in model.state_dict().keys():
transfered_model_weights[weights_name] = model_weights['.'.join(
weights_name.split('.')[1:])]
return transfered_model_weights
# draw the body keypoint and lims
def draw_bodypose(canvas, candidate, subset):
stickwidth = 4
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15],
[15, 17], [1, 16], [16, 18], [3, 17], [6, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0],
[170, 255, 0], [85, 255, 0], [0, 255, 0], [0, 255, 85],
[0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255],
[0, 0, 255], [85, 0, 255], [170, 0, 255], [255, 0, 255],
[255, 0, 170], [255, 0, 85]]
for i in range(18):
for n in range(len(subset)):
index = int(subset[n][i])
if index == -1:
continue
x, y = candidate[index][0:2]
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
for i in range(17):
for n in range(len(subset)):
index = subset[n][np.array(limbSeq[i]) - 1]
if -1 in index:
continue
cur_canvas = canvas.copy()
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly(
(int(mY), int(mX)), (int(length / 2), stickwidth), int(angle),
0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
# plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
# plt.imshow(canvas[:, :, [2, 1, 0]])
return canvas
# image drawed by opencv is not good.
def draw_handpose(canvas, all_hand_peaks, show_number=False):
edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8],
[0, 9], [9, 10], [10, 11], [11, 12], [0, 13], [13, 14], [14, 15],
[15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
for peaks in all_hand_peaks:
for ie, e in enumerate(edges):
if np.sum(np.all(peaks[e], axis=1) == 0) == 0:
x1, y1 = peaks[e[0]]
x2, y2 = peaks[e[1]]
cv2.line(canvas, (x1, y1), (x2, y2),
matplotlib.colors.hsv_to_rgb(
[ie / float(len(edges)), 1.0, 1.0]) * 255,
thickness=2)
for i, keyponit in enumerate(peaks):
x, y = keyponit
cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
if show_number:
cv2.putText(canvas,
str(i), (x, y),
cv2.FONT_HERSHEY_SIMPLEX,
0.3, (0, 0, 0),
lineType=cv2.LINE_AA)
return canvas
# detect hand according to body pose keypoints
# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/
# master/src/openpose/hand/handDetector.cpp
def handDetect(candidate, subset, oriImg):
# right hand: wrist 4, elbow 3, shoulder 2
# left hand: wrist 7, elbow 6, shoulder 5
ratioWristElbow = 0.33
detect_result = []
image_height, image_width = oriImg.shape[0:2]
for person in subset.astype(int):
# if any of three not detected
has_left = np.sum(person[[5, 6, 7]] == -1) == 0
has_right = np.sum(person[[2, 3, 4]] == -1) == 0
if not (has_left or has_right):
continue
hands = []
# left hand
if has_left:
left_shoulder_index, left_elbow_index, left_wrist_index = person[[
5, 6, 7
]]
x1, y1 = candidate[left_shoulder_index][:2]
x2, y2 = candidate[left_elbow_index][:2]
x3, y3 = candidate[left_wrist_index][:2]
hands.append([x1, y1, x2, y2, x3, y3, True])
# right hand
if has_right:
right_shoulder_index, right_elbow_index, right_wrist_index = person[
[2, 3, 4]]
x1, y1 = candidate[right_shoulder_index][:2]
x2, y2 = candidate[right_elbow_index][:2]
x3, y3 = candidate[right_wrist_index][:2]
hands.append([x1, y1, x2, y2, x3, y3, False])
for x1, y1, x2, y2, x3, y3, is_left in hands:
# pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
# handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
# handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
# const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
# const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
# handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
x = x3 + ratioWristElbow * (x3 - x2)
y = y3 + ratioWristElbow * (y3 - y2)
distanceWristElbow = math.sqrt((x3 - x2)**2 + (y3 - y2)**2)
distanceElbowShoulder = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
# x-y refers to the center --> offset to topLeft point
# handRectangle.x -= handRectangle.width / 2.f;
# handRectangle.y -= handRectangle.height / 2.f;
x -= width / 2
y -= width / 2 # width = height
# overflow the image
if x < 0:
x = 0
if y < 0:
y = 0
width1 = width
width2 = width
if x + width > image_width:
width1 = image_width - x
if y + width > image_height:
width2 = image_height - y
width = min(width1, width2)
# the max hand box value is 20 pixels
if width >= 20:
detect_result.append([int(x), int(y), int(width), is_left])
'''
return value: [[x, y, w, True if left hand else False]].
width=height since the network require squared input.
x, y is the coordinate of top left
'''
return detect_result
# get max index of 2d array
def npmax(array):
arrayindex = array.argmax(1)
arrayvalue = array.max(1)
i = arrayvalue.argmax()
j = arrayindex[i]
return i, j
def make_layers(block, no_relu_layers):
layers = []
for layer_name, v in block.items():
if 'pool' in layer_name:
layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
layers.append((layer_name, layer))
else:
conv2d = nn.Conv2d(in_channels=v[0],
out_channels=v[1],
kernel_size=v[2],
stride=v[3],
padding=v[4])
layers.append((layer_name, conv2d))
if layer_name not in no_relu_layers:
layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
return nn.Sequential(OrderedDict(layers))
class bodypose_model(nn.Module):
def __init__(self):
super(bodypose_model, self).__init__()
# these layers have no relu layer
no_relu_layers = [
'conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',
'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',
'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',
'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1'
]
blocks = {}
block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]),
('conv1_2', [64, 64, 3, 1, 1]),
('pool1_stage1', [2, 2, 0]),
('conv2_1', [64, 128, 3, 1, 1]),
('conv2_2', [128, 128, 3, 1, 1]),
('pool2_stage1', [2, 2, 0]),
('conv3_1', [128, 256, 3, 1, 1]),
('conv3_2', [256, 256, 3, 1, 1]),
('conv3_3', [256, 256, 3, 1, 1]),
('conv3_4', [256, 256, 3, 1, 1]),
('pool3_stage1', [2, 2, 0]),
('conv4_1', [256, 512, 3, 1, 1]),
('conv4_2', [512, 512, 3, 1, 1]),
('conv4_3_CPM', [512, 256, 3, 1, 1]),
('conv4_4_CPM', [256, 128, 3, 1, 1])])
# Stage 1
block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
('conv5_5_CPM_L1', [512, 38, 1, 1, 0])])
block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
('conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
blocks['block1_1'] = block1_1
blocks['block1_2'] = block1_2
self.model0 = make_layers(block0, no_relu_layers)
# Stages 2 - 6
for i in range(2, 7):
blocks['block%d_1' % i] = OrderedDict([
('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
])
blocks['block%d_2' % i] = OrderedDict([
('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_1 = blocks['block1_1']
self.model2_1 = blocks['block2_1']
self.model3_1 = blocks['block3_1']
self.model4_1 = blocks['block4_1']
self.model5_1 = blocks['block5_1']
self.model6_1 = blocks['block6_1']
self.model1_2 = blocks['block1_2']
self.model2_2 = blocks['block2_2']
self.model3_2 = blocks['block3_2']
self.model4_2 = blocks['block4_2']
self.model5_2 = blocks['block5_2']
self.model6_2 = blocks['block6_2']
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1, out1_2, out1], 1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1, out2_2, out1], 1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1, out3_2, out1], 1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1, out4_2, out1], 1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1, out5_2, out1], 1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1, out6_2
class handpose_model(nn.Module):
def __init__(self):
super(handpose_model, self).__init__()
# these layers have no relu layer
no_relu_layers = [
'conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3', 'Mconv7_stage4',
'Mconv7_stage5', 'Mconv7_stage6'
]
# stage 1
block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]),
('conv1_2', [64, 64, 3, 1, 1]),
('pool1_stage1', [2, 2, 0]),
('conv2_1', [64, 128, 3, 1, 1]),
('conv2_2', [128, 128, 3, 1, 1]),
('pool2_stage1', [2, 2, 0]),
('conv3_1', [128, 256, 3, 1, 1]),
('conv3_2', [256, 256, 3, 1, 1]),
('conv3_3', [256, 256, 3, 1, 1]),
('conv3_4', [256, 256, 3, 1, 1]),
('pool3_stage1', [2, 2, 0]),
('conv4_1', [256, 512, 3, 1, 1]),
('conv4_2', [512, 512, 3, 1, 1]),
('conv4_3', [512, 512, 3, 1, 1]),
('conv4_4', [512, 512, 3, 1, 1]),
('conv5_1', [512, 512, 3, 1, 1]),
('conv5_2', [512, 512, 3, 1, 1]),
('conv5_3_CPM', [512, 128, 3, 1, 1])])
block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]),
('conv6_2_CPM', [512, 22, 1, 1, 0])])
blocks = {}
blocks['block1_0'] = block1_0
blocks['block1_1'] = block1_1
# stage 2-6
for i in range(2, 7):
blocks['block%d' % i] = OrderedDict([
('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
])
for k in blocks.keys():
blocks[k] = make_layers(blocks[k], no_relu_layers)
self.model1_0 = blocks['block1_0']
self.model1_1 = blocks['block1_1']
self.model2 = blocks['block2']
self.model3 = blocks['block3']
self.model4 = blocks['block4']
self.model5 = blocks['block5']
self.model6 = blocks['block6']
def forward(self, x):
out1_0 = self.model1_0(x)
out1_1 = self.model1_1(out1_0)
concat_stage2 = torch.cat([out1_1, out1_0], 1)
out_stage2 = self.model2(concat_stage2)
concat_stage3 = torch.cat([out_stage2, out1_0], 1)
out_stage3 = self.model3(concat_stage3)
concat_stage4 = torch.cat([out_stage3, out1_0], 1)
out_stage4 = self.model4(concat_stage4)
concat_stage5 = torch.cat([out_stage4, out1_0], 1)
out_stage5 = self.model5(concat_stage5)
concat_stage6 = torch.cat([out_stage5, out1_0], 1)
out_stage6 = self.model6(concat_stage6)
return out_stage6
class Hand(object):
def __init__(self, model_path, device='cuda'):
self.model = handpose_model()
if torch.cuda.is_available():
self.model = self.model.to(device)
model_dict = transfer(self.model, torch.load(model_path))
self.model.load_state_dict(model_dict)
self.model.eval()
self.device = device
def __call__(self, oriImg):
scale_search = [0.5, 1.0, 1.5, 2.0]
# scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
thre = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
# paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0),
fx=scale,
fy=scale,
interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = padRightDownCorner(
imageToTest, stride, padValue)
im = np.transpose(
np.float32(imageToTest_padded[:, :, :, np.newaxis]),
(3, 2, 0, 1)) / 256 - 0.5
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float()
if torch.cuda.is_available():
data = data.to(self.device)
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
output = self.model(data).cpu().numpy()
# output = self.model(data).numpy()q
# extract outputs, resize, and remove padding
heatmap = np.transpose(np.squeeze(output),
(1, 2, 0)) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0),
fx=stride,
fy=stride,
interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] -
pad[2], :imageToTest_padded.shape[1] - pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]),
interpolation=cv2.INTER_CUBIC)
heatmap_avg += heatmap / len(multiplier)
all_peaks = []
for part in range(21):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
# 全部小于阈值
if np.sum(binary) == 0:
all_peaks.append([0, 0])
continue
label_img, label_numbers = label(binary,
return_num=True,
connectivity=binary.ndim)
max_index = np.argmax([
np.sum(map_ori[label_img == i])
for i in range(1, label_numbers + 1)
]) + 1
label_img[label_img != max_index] = 0
map_ori[label_img == 0] = 0
y, x = npmax(map_ori)
all_peaks.append([x, y])
return np.array(all_peaks)
class Body(object):
def __init__(self, model_path, device='cuda'):
self.model = bodypose_model()
if torch.cuda.is_available():
self.model = self.model.to(device)
model_dict = transfer(self.model, torch.load(model_path))
self.model.load_state_dict(model_dict)
self.model.eval()
self.device = device
def __call__(self, oriImg):
# scale_search = [0.5, 1.0, 1.5, 2.0]
scale_search = [0.5]
boxsize = 368
stride = 8
padValue = 128
thre1 = 0.1
thre2 = 0.05
multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
for m in range(len(multiplier)):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0),
fx=scale,
fy=scale,
interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = padRightDownCorner(
imageToTest, stride, padValue)
im = np.transpose(
np.float32(imageToTest_padded[:, :, :, np.newaxis]),
(3, 2, 0, 1)) / 256 - 0.5
im = np.ascontiguousarray(im)
data = torch.from_numpy(im).float()
if torch.cuda.is_available():
data = data.to(self.device)
# data = data.permute([2, 0, 1]).unsqueeze(0).float()
with torch.no_grad():
Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
# extract outputs, resize, and remove padding
# heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0))
# output 1 is heatmaps
heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2),
(1, 2, 0)) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0),
fx=stride,
fy=stride,
interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] -
pad[2], :imageToTest_padded.shape[1] - pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]),
interpolation=cv2.INTER_CUBIC)
# paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
paf = np.transpose(np.squeeze(Mconv7_stage6_L1),
(1, 2, 0)) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0),
fx=stride,
fy=stride,
interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] -
pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]),
interpolation=cv2.INTER_CUBIC)
heatmap_avg += heatmap_avg + heatmap / len(multiplier)
paf_avg += +paf / len(multiplier)
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = heatmap_avg[:, :, part]
one_heatmap = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(one_heatmap.shape)
map_left[1:, :] = one_heatmap[:-1, :]
map_right = np.zeros(one_heatmap.shape)
map_right[:-1, :] = one_heatmap[1:, :]
map_up = np.zeros(one_heatmap.shape)
map_up[:, 1:] = one_heatmap[:, :-1]
map_down = np.zeros(one_heatmap.shape)
map_down[:, :-1] = one_heatmap[:, 1:]
peaks_binary = np.logical_and.reduce(
(one_heatmap >= map_left, one_heatmap >= map_right,
one_heatmap >= map_up, one_heatmap >= map_down,
one_heatmap > thre1))
peaks = list(
zip(np.nonzero(peaks_binary)[1],
np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
peak_id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [
peaks_with_score[i] + (peak_id[i], )
for i in range(len(peak_id))
]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9],
[9, 10], [10, 11], [2, 12], [12, 13], [13, 14], [2, 1],
[1, 15], [15, 17], [1, 16], [16, 18], [3, 17], [6, 18]]
# the middle joints heatmap correpondence
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44],
[19, 20], [21, 22], [23, 24], [25, 26], [27, 28], [29, 30],
[47, 48], [49, 50], [53, 54], [51, 52], [55, 56], [37, 38],
[45, 46]]
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0] - 1]
candB = all_peaks[limbSeq[k][1] - 1]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if (nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
norm = max(0.001, norm)
vec = np.divide(vec, norm)
startend = list(
zip(
np.linspace(candA[i][0],
candB[j][0],
num=mid_num),
np.linspace(candA[i][1],
candB[j][1],
num=mid_num)))
vec_x = np.array([
score_mid[int(round(startend[ii][1])),
int(round(startend[ii][0])), 0]
for ii in range(len(startend))
])
vec_y = np.array([
score_mid[int(round(startend[ii][1])),
int(round(startend[ii][0])), 1]
for ii in range(len(startend))
])
score_midpts = np.multiply(
vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(
score_midpts) + min(
0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(np.nonzero(
score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([
i, j, score_with_dist_prior,
score_with_dist_prior + candA[i][2] +
candB[j][2]
])
connection_candidate = sorted(connection_candidate,
key=lambda x: x[2],
reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if (i not in connection[:, 3]
and j not in connection[:, 4]):
connection = np.vstack(
[connection, [candA[i][3], candB[j][3], s, i, j]])
if (len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array(
[item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][
indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if subset[j][indexB] != partBs[i]:
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[
partBs[i].astype(int),
2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) +
(subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[
partBs[i].astype(int),
2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(
candidate[connection_all[k][i, :2].astype(int),
2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
# subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
# candidate: x, y, score, id
return candidate, subset
| @ANNOTATORS.register_class() | 1 | 2023-12-21 02:01:48+00:00 | 8k |
YyzHarry/shortcut-ood-fairness | learning/algorithms.py | [
{
"identifier": "networks",
"path": "models/networks.py",
"snippet": "class Identity(nn.Module):\nclass MLP(nn.Module):\nclass PretrainedImageModel(torch.nn.Module):\nclass ResNet(PretrainedImageModel):\nclass TimmModel(PretrainedImageModel):\nclass HubModel(PretrainedImageModel):\nclass ImportedModel(PretrainedImageModel):\n def __init__(self):\n def forward(self, x):\n def __init__(self, n_inputs, n_outputs, hparams):\n def forward(self, x):\n def forward(self, x):\n def train(self, mode=True):\n def freeze_bn(self):\n def __init__(self, input_shape, hparams, pretrained=True, freeze_bn=False):\n def __init__(self, name, input_shape, hparams, pretrained=True, freeze_bn=False):\n def __init__(self, name1, name2, input_shape, hparams, pretrained=True, freeze_bn=False):\n def __init__(self, network, n_outputs, input_shape, hparams, pretrained=True, freeze_bn=False):\ndef replace_module_prefix(state_dict, prefix, replace_with=\"\"):\ndef get_torchvision_state_dict(url):\ndef imagenet_resnet50_ssl(URL):\ndef load_swag(URL):\ndef Featurizer(data_type, input_shape, hparams):\ndef Classifier(in_features, out_features, is_nonlinear=False):\nSIMCLR_RN50_URL = \"https://dl.fbaipublicfiles.com/vissl/model_zoo/\" \\\n \"simclr_rn50_800ep_simclr_8node_resnet_16_07_20.7e8feed1/model_final_checkpoint_phase799.torch\"\nBARLOWTWINS_RN50_URL = \"https://dl.fbaipublicfiles.com/vissl/model_zoo/\" \\\n \"barlow_twins/barlow_twins_32gpus_4node_imagenet1k_1000ep_resnet50.torch\""
},
{
"identifier": "joint_dro",
"path": "learning/joint_dro.py",
"snippet": "GEOMETRIES = ('cvar')\nMIN_REL_DIFFERENCE = 1e-5\ndef cvar_value(p, v, reg):\ndef bisection(eta_min, eta_max, f, tol=1e-6, max_iter=500):\n def __init__(self, size, reg, geometry, tol=1e-4, max_iter=1000, debugging=False):\n def best_response(self, v):\n def p(eta):\n def bisection_target(eta):\n def p(eta):\n def bisection_target(eta):\n def p(eta):\n def bisection_target(eta):\n def p(eta):\n def bisection_target(eta):\n def forward(self, v):\nclass RobustLoss(torch.nn.Module):"
},
{
"identifier": "get_optimizers",
"path": "learning/optimizers.py",
"snippet": "def get_bert_optim(network, lr, weight_decay):\ndef get_sgd_optim(network, lr, weight_decay):\ndef get_adam_optim(network, lr, weight_decay):"
},
{
"identifier": "mixup_data",
"path": "utils/misc.py",
"snippet": "def mixup_data(x, y, alpha=1., device=\"cpu\"):\n lam = np.random.beta(alpha, alpha) if alpha > 0 else 1\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).to(device)\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n\n return mixed_x, y_a, y_b, lam"
}
] | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import copy
import numpy as np
from transformers import get_scheduler
from models import networks
from learning import joint_dro
from learning.optimizers import get_optimizers
from utils.misc import mixup_data | 3,906 | loss_value = objective + (self.hparams['mmd_gamma'] * penalty)
return loss_value
class MMD(AbstractMMD):
"""MMD using Gaussian kernel"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(MMD, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes, gaussian=True)
class CORAL(AbstractMMD):
"""MMD using mean and covariance difference"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CORAL, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes, gaussian=False)
class AbstractDANN(Algorithm):
"""Domain-Adversarial Neural Networks (abstract class)"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes=None, attr_sizes=None, conditional=False, class_balance=False):
super(AbstractDANN, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.register_buffer('update_count', torch.tensor([0]))
self.conditional = conditional
self.class_balance = class_balance
self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier']
)
self.discriminator = networks.MLP(self.featurizer.n_outputs, num_attributes, self.hparams)
self.class_embeddings = nn.Embedding(num_classes, self.featurizer.n_outputs)
# optimizers
self.disc_opt = torch.optim.SGD(
(list(self.discriminator.parameters()) + list(self.class_embeddings.parameters())),
lr=self.hparams["lr_d"],
weight_decay=self.hparams['weight_decay_d'],
momentum=0.9)
self.gen_opt = torch.optim.SGD(
(list(self.featurizer.parameters()) + list(self.classifier.parameters())),
lr=self.hparams["lr_g"],
weight_decay=self.hparams['weight_decay_g'],
momentum=0.9)
def update(self, minibatch, step):
all_i, all_x, all_y, all_a = minibatch
self.update_count += 1
all_z = self.featurizer(all_x)
if self.conditional:
disc_input = all_z + self.class_embeddings(all_y)
else:
disc_input = all_z
disc_out = self.discriminator(disc_input)
if self.class_balance:
y_counts = F.one_hot(all_y).sum(dim=0)
weights = 1. / (y_counts[all_y] * y_counts.shape[0]).float()
disc_loss = F.cross_entropy(disc_out, all_a, reduction='none')
disc_loss = (weights * disc_loss).sum()
else:
disc_loss = F.cross_entropy(disc_out, all_a)
disc_softmax = F.softmax(disc_out, dim=1)
input_grad = autograd.grad(disc_softmax[:, all_a].sum(),
[disc_input], create_graph=True)[0]
grad_penalty = (input_grad ** 2).sum(dim=1).mean(dim=0)
disc_loss += self.hparams['grad_penalty'] * grad_penalty
d_steps_per_g = self.hparams['d_steps_per_g_step']
if self.update_count.item() % (1 + d_steps_per_g) < d_steps_per_g:
self.disc_opt.zero_grad()
disc_loss.backward()
self.disc_opt.step()
return {'disc_loss': disc_loss.item()}
else:
all_preds = self.classifier(all_z)
classifier_loss = F.cross_entropy(all_preds, all_y)
gen_loss = classifier_loss + (self.hparams['lambda'] * -disc_loss)
self.disc_opt.zero_grad()
self.gen_opt.zero_grad()
gen_loss.backward()
self.gen_opt.step()
return {'gen_loss': gen_loss.item()}
def return_feats(self, x):
return self.featurizer(x)
def predict(self, x):
return self.classifier(self.featurizer(x))
class DANN(AbstractDANN):
"""Unconditional DANN"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(DANN, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes, attr_sizes, conditional=False, class_balance=False)
class CDANN(AbstractDANN):
"""Conditional DANN"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CDANN, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes, attr_sizes, conditional=True, class_balance=True)
class CVaRDRO(ERM):
"""
DRO with CVaR uncertainty set
https://arxiv.org/pdf/2010.05893.pdf
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CVaRDRO, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
|
ALGORITHMS = [
'ERM',
'StratifiedERM',
# subgroup methods
'GroupDRO',
'IRM',
'CVaRDRO',
'JTT',
'LISA',
'DFR',
# data augmentation
'Mixup',
# domain generalization methods
'MMD',
'CORAL',
'DANN',
'CDANN',
# imbalanced learning methods
'ReSample',
'ReWeight',
'SqrtReWeight',
'CBLoss',
'Focal',
'LDAM',
'BSoftmax',
'CRT',
'ReWeightCRT',
'VanillaCRT',
# flat minima optimizer
'MA',
'SAM',
# attribute balancing
'GroupDROAttr',
'ReSampleAttr',
'ReWeightAttr',
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
return globals()[algorithm_name]
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a subgroup robustness algorithm.
Subclasses should implement the following:
- _init_model()
- _compute_loss()
- update()
- return_feats()
- predict()
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(Algorithm, self).__init__()
self.hparams = hparams
self.data_type = data_type
self.num_classes = num_classes
self.num_attributes = num_attributes
self.num_examples = num_examples
def _init_model(self):
raise NotImplementedError
def _compute_loss(self, i, x, y, a, step):
raise NotImplementedError
def update(self, minibatch, step):
"""Perform one update step."""
raise NotImplementedError
def return_feats(self, x):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def return_groups(self, y, a):
"""Given a list of (y, a) tuples, return indexes of samples belonging to each subgroup"""
idx_g, idx_samples = [], []
all_g = y * self.num_attributes + a
for g in all_g.unique():
idx_g.append(g)
idx_samples.append(all_g == g)
return zip(idx_g, idx_samples)
@staticmethod
def return_attributes(all_a):
"""Given a list of attributes, return indexes of samples belonging to each attribute"""
idx_a, idx_samples = [], []
for a in all_a.unique():
idx_a.append(a)
idx_samples.append(all_a == a)
return zip(idx_a, idx_samples)
class ERM(Algorithm):
"""Empirical Risk Minimization (ERM)"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(ERM, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier']
)
self.network = nn.Sequential(self.featurizer, self.classifier)
self._init_model()
def _init_model(self):
self.clip_grad = (self.data_type == "text" and self.hparams["optimizer"] == "adamw")
if self.data_type in ["images", "tabular"]:
self.optimizer = get_optimizers[self.hparams['optimizer']](
self.network,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = None
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
elif self.data_type == "text":
self.network.zero_grad()
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.network,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=self.hparams["steps"]
)
self.loss = torch.nn.CrossEntropyLoss(reduction="none")
else:
raise NotImplementedError(f"{self.data_type} not supported.")
def _compute_loss(self, i, x, y, a, step):
return self.loss(self.predict(x), y).mean()
def update(self, minibatch, step):
all_i, all_x, all_y, all_a = minibatch
loss = self._compute_loss(all_i, all_x, all_y, all_a, step)
self.optimizer.zero_grad()
loss.backward()
if self.clip_grad:
torch.nn.utils.clip_grad_norm_(self.network.parameters(), 1.0)
self.optimizer.step()
if self.lr_scheduler is not None:
self.lr_scheduler.step()
if self.data_type == "text":
self.network.zero_grad()
return {'loss': loss.item()}
def return_feats(self, x):
return self.featurizer(x)
def predict(self, x):
return self.network(x)
class GroupDRO(ERM):
"""
Group DRO minimizes the error at the worst group [https://arxiv.org/pdf/1911.08731.pdf]
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(GroupDRO, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.register_buffer(
"q", torch.ones(self.num_classes * self.num_attributes).cuda())
def _compute_loss(self, i, x, y, a, step):
losses = self.loss(self.predict(x), y)
for idx_g, idx_samples in self.return_groups(y, a):
self.q[idx_g] *= (self.hparams["groupdro_eta"] * losses[idx_samples].mean()).exp().item()
self.q /= self.q.sum()
loss_value = 0
for idx_g, idx_samples in self.return_groups(y, a):
loss_value += self.q[idx_g] * losses[idx_samples].mean()
return loss_value
class GroupDROAttr(ERM):
"""
GroupDROAttr minimizes the error at the worst attribute [https://arxiv.org/pdf/1911.08731.pdf]
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(GroupDROAttr, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.register_buffer(
"q", torch.ones(self.num_attributes).cuda())
def _compute_loss(self, i, x, y, a, step):
losses = self.loss(self.predict(x), y)
for idx_a in range(self.num_attributes):
mask = (a == idx_a)
if mask.sum() > 0:
self.q[idx_a] *= (self.hparams["groupdro_eta"] * losses[mask].mean()).exp().item()
self.q /= self.q.sum()
loss_value = 0
for idx_a in range(self.num_attributes):
mask = (a == idx_a)
if mask.sum() > 0:
loss_value += self.q[idx_a] * losses[mask].mean()
return loss_value
class StratifiedERM(ERM):
"""No changes to ERM, but flags subsetting dataset"""
class ReSample(ERM):
"""Naive resample, with no changes to ERM, but enable balanced sampling in hparams"""
class ReSampleAttr(ERM):
"""Naive resample, with no changes to ERM, but enable balanced sampling in hparams"""
class ReWeightBase(ERM):
"""Naive inverse re-weighting"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None, group_def='group'):
super(ReWeightBase, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.group_def = group_def
if group_def == 'group':
assert len(grp_sizes) == num_classes * num_attributes
grp_sizes = [x if x else np.inf for x in grp_sizes]
elif group_def == 'attr':
assert len(attr_sizes) == num_attributes
grp_sizes = [x if x else np.inf for x in attr_sizes]
per_grp_weights = 1 / np.array(grp_sizes)
per_grp_weights = per_grp_weights / np.sum(per_grp_weights) * len(grp_sizes)
self.weights_per_grp = torch.FloatTensor(per_grp_weights)
def _compute_loss(self, i, x, y, a, step):
losses = self.loss(self.predict(x), y)
if self.group_def == 'group':
all_g = y * self.num_attributes + a
elif self.group_def == 'attr':
all_g = a
loss_value = (self.weights_per_grp.type_as(losses)[all_g] * losses).mean()
return loss_value
class ReWeight(ReWeightBase):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(ReWeight, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes, 'group')
class ReWeightAttr(ReWeightBase):
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(ReWeightAttr, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes, 'attr')
class SqrtReWeight(ReWeight):
"""Square-root inverse re-weighting"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(SqrtReWeight, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
assert len(grp_sizes) == num_classes * num_attributes
grp_sizes = [x if x else np.inf for x in grp_sizes]
per_grp_weights = 1 / np.sqrt(np.array(grp_sizes))
per_grp_weights = per_grp_weights / np.sum(per_grp_weights) * len(grp_sizes)
self.weights_per_grp = torch.FloatTensor(per_grp_weights)
class CBLoss(ReWeight):
"""Class-balanced loss, https://arxiv.org/pdf/1901.05555.pdf"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CBLoss, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
assert len(grp_sizes) == num_classes * num_attributes
grp_sizes = [x if x else np.inf for x in grp_sizes]
effective_num = 1. - np.power(self.hparams["beta"], grp_sizes)
effective_num = np.array(effective_num)
effective_num[effective_num == 1] = np.inf
per_grp_weights = (1. - self.hparams["beta"]) / effective_num
per_grp_weights = per_grp_weights / np.sum(per_grp_weights) * len(grp_sizes)
self.weights_per_grp = torch.FloatTensor(per_grp_weights)
class Focal(ERM):
"""Focal loss, https://arxiv.org/abs/1708.02002"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(Focal, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
@staticmethod
def focal_loss(input_values, gamma):
p = torch.exp(-input_values)
loss = (1 - p) ** gamma * input_values
return loss.mean()
def _compute_loss(self, i, x, y, a, step):
return self.focal_loss(self.loss(self.predict(x), y), self.hparams["gamma"])
class LDAM(ERM):
"""LDAM loss, https://arxiv.org/abs/1906.07413"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(LDAM, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
assert len(grp_sizes) == num_classes * num_attributes
# attribute-agnostic as modifying class-dependent margins
class_sizes = [np.sum(grp_sizes[i * num_attributes:(i+1) * num_attributes]) for i in range(num_classes)]
class_sizes = [x if x else np.inf for x in class_sizes]
m_list = 1. / np.sqrt(np.sqrt(np.array(class_sizes)))
m_list = m_list * (self.hparams["max_m"] / np.max(m_list))
self.m_list = torch.FloatTensor(m_list)
def _compute_loss(self, i, x, y, a, step):
x = self.predict(x)
index = torch.zeros_like(x, dtype=torch.uint8)
index.scatter_(1, y.data.view(-1, 1), 1)
index_float = index.type(torch.FloatTensor)
batch_m = torch.matmul(self.m_list[None, :].type_as(x), index_float.transpose(0, 1).type_as(x))
batch_m = batch_m.view((-1, 1))
x_m = x - batch_m
output = torch.where(index, x_m, x)
loss_value = F.cross_entropy(self.hparams["scale"] * output, y)
return loss_value
class BSoftmax(ERM):
"""Balanced softmax, https://arxiv.org/abs/2007.10740"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(BSoftmax, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
assert len(grp_sizes) == num_classes * num_attributes
# attribute-agnostic as modifying class-dependent margins
class_sizes = [np.sum(grp_sizes[i * num_attributes:(i+1) * num_attributes]) for i in range(num_classes)]
self.n_samples_per_cls = torch.FloatTensor(class_sizes)
def _compute_loss(self, i, x, y, a, step):
x = self.predict(x)
spc = self.n_samples_per_cls.type_as(x)
spc = spc.unsqueeze(0).expand(x.shape[0], -1)
x = x + spc.log()
loss_value = F.cross_entropy(input=x, target=y)
return loss_value
class CRT(ERM):
"""Classifier re-training with balanced sampling during the second earning stage"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CRT, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
# fix stage 1 trained featurizer
for name, param in self.featurizer.named_parameters():
param.requires_grad = False
# only optimize the classifier
if self.data_type in ["images", "tabular"]:
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = None
elif self.data_type == "text":
self.network.zero_grad()
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=self.hparams["steps"]
)
else:
raise NotImplementedError(f"{self.data_type} not supported.")
class ReWeightCRT(ReWeight):
"""Classifier re-training with balanced re-weighting during the second earning stage"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(ReWeightCRT, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
# fix stage 1 trained featurizer
for name, param in self.featurizer.named_parameters():
param.requires_grad = False
# only optimize the classifier
if self.data_type in ["images", "tabular"]:
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = None
elif self.data_type == "text":
self.network.zero_grad()
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=self.hparams["steps"]
)
else:
raise NotImplementedError(f"{self.data_type} not supported.")
class VanillaCRT(ERM):
"""Classifier re-training with normal (instance-balanced) sampling"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(VanillaCRT, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
# fix stage 1 trained featurizer
for name, param in self.featurizer.named_parameters():
param.requires_grad = False
# only optimize the classifier
if self.data_type in ["images", "tabular"]:
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = None
elif self.data_type == "text":
self.network.zero_grad()
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
self.hparams['weight_decay']
)
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=self.hparams["steps"]
)
else:
raise NotImplementedError(f"{self.data_type} not supported.")
class DFR(ERM):
"""
Classifier re-training with sub-sampled, group-balanced, held-out(validation) data and l1 regularization.
Note that when attribute is unavailable in validation data, group-balanced reduces to class-balanced.
https://openreview.net/pdf?id=Zb6c8A-Fghk
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(DFR, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
# fix stage 1 trained featurizer
for name, param in self.featurizer.named_parameters():
param.requires_grad = False
# only optimize the classifier
if self.data_type in ["images", "tabular"]:
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
0.
)
self.lr_scheduler = None
elif self.data_type == "text":
self.network.zero_grad()
self.optimizer = get_optimizers[self.hparams["optimizer"]](
self.classifier,
self.hparams['lr'],
0.
)
self.lr_scheduler = get_scheduler(
"linear",
optimizer=self.optimizer,
num_warmup_steps=0,
num_training_steps=self.hparams["steps"]
)
else:
raise NotImplementedError(f"{self.data_type} not supported.")
def _compute_loss(self, i, x, y, a, step):
return self.loss(self.predict(x), y).mean() + self.hparams['dfr_reg'] * torch.norm(self.classifier.weight, 1)
class IRM(ERM):
"""Invariant Risk Minimization"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(IRM, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.register_buffer('update_count', torch.tensor([0]))
@staticmethod
def _irm_penalty(logits, y):
device = "cuda" if logits[0][0].is_cuda else "cpu"
scale = torch.tensor(1.).to(device).requires_grad_()
loss_1 = F.cross_entropy(logits[::2] * scale, y[::2])
loss_2 = F.cross_entropy(logits[1::2] * scale, y[1::2])
grad_1 = autograd.grad(loss_1, [scale], create_graph=True)[0]
grad_2 = autograd.grad(loss_2, [scale], create_graph=True)[0]
result = torch.sum(grad_1 * grad_2)
return result
def _compute_loss(self, i, x, y, a, step):
penalty_weight = self.hparams['irm_lambda'] \
if self.update_count >= self.hparams['irm_penalty_anneal_iters'] else 1.0
nll = 0.
penalty = 0.
logits = self.network(x)
for idx_a, idx_samples in self.return_attributes(a):
nll += F.cross_entropy(logits[idx_samples], y[idx_samples])
penalty += self._irm_penalty(logits[idx_samples], y[idx_samples])
nll /= len(a.unique())
penalty /= len(a.unique())
loss_value = nll + (penalty_weight * penalty)
self.update_count += 1
return loss_value
class Mixup(ERM):
"""Mixup of minibatch data"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(Mixup, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
def _compute_loss(self, i, x, y, a, step):
if self.data_type == "text":
feats = self.featurizer(x)
feats, yi, yj, lam = mixup_data(feats, y, self.hparams["mixup_alpha"], device="cuda")
predictions = self.classifier(feats)
else:
x, yi, yj, lam = mixup_data(x, y, self.hparams["mixup_alpha"], device="cuda")
predictions = self.predict(x)
loss_value = lam * F.cross_entropy(predictions, yi) + (1 - lam) * F.cross_entropy(predictions, yj)
return loss_value
class AbstractMMD(ERM):
"""
Perform ERM while matching the pair-wise domain feature distributions using MMD
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes=None, attr_sizes=None, gaussian=False):
super(AbstractMMD, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
if gaussian:
self.kernel_type = "gaussian"
else:
self.kernel_type = "mean_cov"
@staticmethod
def my_cdist(x1, x2):
x1_norm = x1.pow(2).sum(dim=-1, keepdim=True)
x2_norm = x2.pow(2).sum(dim=-1, keepdim=True)
res = torch.addmm(x2_norm.transpose(-2, -1),
x1,
x2.transpose(-2, -1), alpha=-2).add_(x1_norm)
return res.clamp_min_(1e-30)
def gaussian_kernel(self, x, y, gamma=[0.001, 0.01, 0.1, 1, 10, 100, 1000]):
D = self.my_cdist(x, y)
K = torch.zeros_like(D)
for g in gamma:
K.add_(torch.exp(D.mul(-g)))
return K
def mmd(self, x, y):
if self.kernel_type == "gaussian":
Kxx = self.gaussian_kernel(x, x).mean()
Kyy = self.gaussian_kernel(y, y).mean()
Kxy = self.gaussian_kernel(x, y).mean()
return Kxx + Kyy - 2 * Kxy
else:
mean_x = x.mean(0, keepdim=True)
mean_y = y.mean(0, keepdim=True)
cent_x = x - mean_x
cent_y = y - mean_y
cova_x = (cent_x.t() @ cent_x) / (len(x) - 1)
cova_y = (cent_y.t() @ cent_y) / (len(y) - 1)
mean_diff = (mean_x - mean_y).pow(2).mean()
cova_diff = (cova_x - cova_y).pow(2).mean()
return mean_diff + cova_diff
def _compute_loss(self, i, x, y, a, step):
all_feats = self.featurizer(x)
outputs = self.classifier(all_feats)
objective = F.cross_entropy(outputs, y)
features = []
for _, idx_samples in self.return_attributes(a):
features.append(all_feats[idx_samples])
penalty = 0.
for i in range(len(features)):
for j in range(i + 1, len(features)):
penalty += self.mmd(features[i], features[j])
if len(features) > 1:
penalty /= (len(features) * (len(features) - 1) / 2)
loss_value = objective + (self.hparams['mmd_gamma'] * penalty)
return loss_value
class MMD(AbstractMMD):
"""MMD using Gaussian kernel"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(MMD, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes, gaussian=True)
class CORAL(AbstractMMD):
"""MMD using mean and covariance difference"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CORAL, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes, gaussian=False)
class AbstractDANN(Algorithm):
"""Domain-Adversarial Neural Networks (abstract class)"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes=None, attr_sizes=None, conditional=False, class_balance=False):
super(AbstractDANN, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.register_buffer('update_count', torch.tensor([0]))
self.conditional = conditional
self.class_balance = class_balance
self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier']
)
self.discriminator = networks.MLP(self.featurizer.n_outputs, num_attributes, self.hparams)
self.class_embeddings = nn.Embedding(num_classes, self.featurizer.n_outputs)
# optimizers
self.disc_opt = torch.optim.SGD(
(list(self.discriminator.parameters()) + list(self.class_embeddings.parameters())),
lr=self.hparams["lr_d"],
weight_decay=self.hparams['weight_decay_d'],
momentum=0.9)
self.gen_opt = torch.optim.SGD(
(list(self.featurizer.parameters()) + list(self.classifier.parameters())),
lr=self.hparams["lr_g"],
weight_decay=self.hparams['weight_decay_g'],
momentum=0.9)
def update(self, minibatch, step):
all_i, all_x, all_y, all_a = minibatch
self.update_count += 1
all_z = self.featurizer(all_x)
if self.conditional:
disc_input = all_z + self.class_embeddings(all_y)
else:
disc_input = all_z
disc_out = self.discriminator(disc_input)
if self.class_balance:
y_counts = F.one_hot(all_y).sum(dim=0)
weights = 1. / (y_counts[all_y] * y_counts.shape[0]).float()
disc_loss = F.cross_entropy(disc_out, all_a, reduction='none')
disc_loss = (weights * disc_loss).sum()
else:
disc_loss = F.cross_entropy(disc_out, all_a)
disc_softmax = F.softmax(disc_out, dim=1)
input_grad = autograd.grad(disc_softmax[:, all_a].sum(),
[disc_input], create_graph=True)[0]
grad_penalty = (input_grad ** 2).sum(dim=1).mean(dim=0)
disc_loss += self.hparams['grad_penalty'] * grad_penalty
d_steps_per_g = self.hparams['d_steps_per_g_step']
if self.update_count.item() % (1 + d_steps_per_g) < d_steps_per_g:
self.disc_opt.zero_grad()
disc_loss.backward()
self.disc_opt.step()
return {'disc_loss': disc_loss.item()}
else:
all_preds = self.classifier(all_z)
classifier_loss = F.cross_entropy(all_preds, all_y)
gen_loss = classifier_loss + (self.hparams['lambda'] * -disc_loss)
self.disc_opt.zero_grad()
self.gen_opt.zero_grad()
gen_loss.backward()
self.gen_opt.step()
return {'gen_loss': gen_loss.item()}
def return_feats(self, x):
return self.featurizer(x)
def predict(self, x):
return self.classifier(self.featurizer(x))
class DANN(AbstractDANN):
"""Unconditional DANN"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(DANN, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes, attr_sizes, conditional=False, class_balance=False)
class CDANN(AbstractDANN):
"""Conditional DANN"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CDANN, self).__init__(data_type, input_shape, num_classes, num_attributes, num_examples, hparams,
grp_sizes, attr_sizes, conditional=True, class_balance=True)
class CVaRDRO(ERM):
"""
DRO with CVaR uncertainty set
https://arxiv.org/pdf/2010.05893.pdf
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(CVaRDRO, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes) | self._joint_dro_loss_computer = joint_dro.RobustLoss(hparams['joint_dro_alpha'], 0, "cvar") | 1 | 2023-12-15 04:10:31+00:00 | 8k |
RomGai/BrainVis | process.py | [
{
"identifier": "CE",
"path": "loss.py",
"snippet": "class CE:\n def __init__(self, model):\n self.model = model\n self.ce = nn.CrossEntropyLoss()\n self.ce_pretrain = nn.CrossEntropyLoss(ignore_index=0)\n\n def computeft(self, batch):\n seqs, labels ,clip,clip_moreinf= batch\n #print(labels)\n lastrep, rep, scores = self.model(seqs) # B * N\n labels = labels.view(-1).long()\n loss = self.ce(scores, labels)\n return loss\n\n def compute(self, batch):\n seqs, labels = batch\n #print(labels)\n outputs = self.model(seqs) # B * N\n labels = labels.view(-1).long()\n loss = self.ce(outputs, labels)\n return loss\n\n\n def computefreq(self, batch):\n seqs, labels ,clip,clip_moreinf= batch\n #print(labels)\n lastrep,attn_encoded,scores = self.model(seqs) # B * N\n labels = labels.view(-1).long()\n loss = self.ce(scores, labels)\n return loss"
},
{
"identifier": "Align",
"path": "loss.py",
"snippet": "class Align:\n def __init__(self):\n self.mse = nn.MSELoss(reduction='mean')\n self.ce = nn.CrossEntropyLoss()\n\n def compute(self, rep_mask, rep_mask_prediction):\n align_loss = self.mse(rep_mask, rep_mask_prediction)\n return align_loss"
},
{
"identifier": "Reconstruct",
"path": "loss.py",
"snippet": "class Reconstruct:\n def __init__(self):\n self.ce = nn.CrossEntropyLoss(label_smoothing=0.2)\n\n def compute(self, token_prediction_prob, tokens):\n hits = torch.sum(torch.argmax(token_prediction_prob, dim=-1) == tokens)\n NDCG10 = recalls_and_ndcgs_for_ks(token_prediction_prob.view(-1, token_prediction_prob.shape[-1]),\n tokens.reshape(-1, 1), 10)\n reconstruct_loss = self.ce(token_prediction_prob.view(-1, token_prediction_prob.shape[-1]), tokens.view(-1))\n return reconstruct_loss, hits, NDCG10"
},
{
"identifier": "CM",
"path": "loss.py",
"snippet": "class CM:\n def __init__(self):\n self.mse = nn.MSELoss(reduction='mean')\n self.cos = nn.CosineEmbeddingLoss()\n\n def compute(self, clip_pred, clip):\n target_labels = torch.ones(len(clip_pred)).to(\"cuda\") # 目标相似度标签\n cosine_loss=self.cos(clip_pred, clip,target_labels)\n return cosine_loss"
},
{
"identifier": "fit_lr",
"path": "classification.py",
"snippet": "def fit_lr(features, y):\n pipe = make_pipeline(\n StandardScaler(),\n LogisticRegression(\n random_state=3407,\n max_iter=1000000,\n multi_class='ovr'\n )\n )\n pipe.fit(features, y)\n return pipe"
},
{
"identifier": "get_rep_with_label",
"path": "classification.py",
"snippet": "def get_rep_with_label(model, dataloader):\n reps = []\n labels = []\n with torch.no_grad():\n for batch in tqdm(dataloader):\n seq, label,clip,clip_moreinf = batch\n seq = seq.to(args.device)\n labels += label.cpu().numpy().tolist()\n rep = model(seq)\n reps += rep.cpu().numpy().tolist()\n return reps, labels"
},
{
"identifier": "get_freqrep_with_label",
"path": "classification.py",
"snippet": "def get_freqrep_with_label(freqtime_model, dataloader):\n reps = []\n labels = []\n with torch.no_grad():\n for batch in tqdm(dataloader):\n seq, label,clip_moreinf = batch\n seq = seq.to(args.device)\n labels += label.cpu().numpy().tolist()\n rep,encoded,xcls = freqtime_model(seq)\n reps += rep.cpu().numpy().tolist()\n return reps, labels"
},
{
"identifier": "AlignNet",
"path": "model/BrainVisModels.py",
"snippet": "class AlignNet(nn.Module):\n def __init__(self, input_size, freq_size, output_size,pretrained_model):\n super(AlignNet, self).__init__()\n\n self.pretrained_model = pretrained_model#TimeFreqEncoder\n\n self.fc01=nn.Linear(input_size+freq_size+40, 4*input_size)\n self.tanh = nn.Tanh()\n self.fc02 = nn.Linear(4*input_size, input_size)\n self.tanh = nn.Tanh()\n self.fc03=nn.Linear(input_size, 4*input_size)\n self.tanh = nn.Tanh()\n self.fc04 = nn.Linear(4*input_size, input_size)\n self.tanh = nn.Tanh()\n self.fc05=nn.Linear(input_size, 4*input_size)\n self.tanh = nn.Tanh()\n self.fc6 = nn.Linear(4*input_size, output_size)\n\n def forward(self, x):\n lastrep,encoded,scores=self.pretrained_model(x)\n x = torch.cat((encoded, scores), dim=1)\n x = self.fc01(x)\n x = self.tanh(x)\n res_4is_1=x\n x = self.fc02(x)\n x = self.tanh(x)\n res_is_2 = x\n x = self.fc03(x)+res_4is_1\n x = self.tanh(x)\n res_4is_2 = x\n x = self.fc04(x)+res_is_2\n x = self.tanh(x)\n x = self.fc05(x)+res_4is_2\n x = self.tanh(x)\n x = self.fc6(x)\n return x"
},
{
"identifier": "TimeFreqEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class TimeFreqEncoder(nn.Module):\n def __init__(self, pretrained_model_time,pretrained_model_freq,args):\n super(TimeFreqEncoder, self).__init__()\n\n self.pretrained_model_time = pretrained_model_time\n self.pretrained_model_time.nocliptune=True\n self.pretrained_model_time.linear_proba=False\n self.pretrained_model_freq=pretrained_model_freq\n\n self.fc01 =nn.Linear( args.d_model+128, args.num_class)\n\n def forward(self,x):\n lastrep,time_feature,cls=self.pretrained_model_time(x)\n lstmcls,freq_feature=self.pretrained_model_freq(x)\n x = torch.cat((time_feature, freq_feature), dim=1)\n\n lastrep = x\n encoded=x\n x = self.fc01(encoded)\n\n scores=x\n return lastrep,encoded,scores"
},
{
"identifier": "FreqEncoder",
"path": "model/BrainVisModels.py",
"snippet": "class FreqEncoder(nn.Module):\n\n def __init__(self, input_size=128, lstm_size=128, lstm_layers=1, output_size=128):\n # Call parent\n super().__init__()\n # Define parameters\n self.input_size = input_size\n self.lstm_size = lstm_size\n self.lstm_layers = lstm_layers\n self.output_size = output_size\n\n # Define internal modules\n self.lstm = nn.LSTM(input_size, lstm_size, num_layers=lstm_layers, batch_first=True)\n self.output = nn.Linear(lstm_size, output_size)\n self.classifier = nn.Linear(output_size, 40)\n\n def forward(self, x):\n batch_size = x.size(0)\n x = x.permute(0, 2, 1)\n x = x.cpu()\n fourier_transform = np.fft.fft(x, axis=2)\n half_spectrum = fourier_transform[:, :, 1:440 // 2 + 1]\n amplitude_spectrum = np.abs(half_spectrum)\n\n amplitude_spectrum = torch.tensor(amplitude_spectrum).float()\n\n x = amplitude_spectrum.permute(0, 2, 1)\n x = x.to(\"cuda\")\n\n lstm_init = (torch.zeros(self.lstm_layers, batch_size, self.lstm_size),\n torch.zeros(self.lstm_layers, batch_size, self.lstm_size))\n if x.is_cuda: lstm_init = (lstm_init[0].cuda(), lstm_init[0].cuda())\n lstm_init = (Variable(lstm_init[0], volatile=x.volatile), Variable(lstm_init[1], volatile=x.volatile))\n\n x = self.lstm(x, lstm_init)[0][:, -1, :]\n reps = x\n # Forward output\n xa = F.relu(self.output(x))\n x = self.classifier(xa)\n return x, xa"
}
] | import time
import torch
import numpy as np
import argparse
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
from tqdm import tqdm
from loss import CE, Align, Reconstruct,CM
from torch.optim.lr_scheduler import LambdaLR
from classification import fit_lr, get_rep_with_label,get_freqrep_with_label
from model.BrainVisModels import AlignNet,TimeFreqEncoder,FreqEncoder | 5,083 | metrics['f1'] = f1_score(y_true=label, y_pred=pred)
metrics['precision'] = precision_score(y_true=label, y_pred=pred)
metrics['recall'] = recall_score(y_true=label, y_pred=pred)
else:
metrics['f1'] = f1_score(y_true=label, y_pred=pred, average='macro')
metrics['micro_f1'] = f1_score(y_true=label, y_pred=pred, average='micro')
metrics['acc'] = accuracy_score(y_true=label, y_pred=pred)
metrics['test_loss'] = test_loss / (idx + 1)
return metrics
def compute_metrics(self, batch):
seqs, label, clip, clip_moreinf = batch
lastrep, rep,scores = self.model(seqs)
_, pred = torch.topk(scores, 1)
test_loss = self.test_cr(scores, label.view(-1).long())
pred = pred.view(-1).tolist()
return pred, label.tolist(), test_loss
def compute_metrics_freq(self, batch,model):
#if len(batch) == 2:
seqs, label,clip,clip_moreinf = batch
lastrep, rep,scores = model(seqs)
#else:
# seqs1, seqs2, label = batch
# lastrep, rep, scores = self.model((seqs1, seqs2))
_, pred = torch.topk(scores, 1)
#print(np.shape(scores))
test_loss = self.test_cr(scores, label.view(-1).long())
pred = pred.view(-1).tolist()
return pred, label.tolist(), test_loss
def _confusion_mat(self, label, pred):
mat = np.zeros((self.args.num_class, self.args.num_class))
for _label, _pred in zip(label, pred):
mat[_label, _pred] += 1
return mat
def print_process(self, *x):
if self.verbose:
print(*x)
def cont_pretrain(self):
start_epoch=300
state_dict = torch.load(self.save_path + '/pretrain_model_epoch300.pkl', map_location=self.device)
eval_acc=0.0 # It should be modified.
self.model.load_state_dict(state_dict)
print('cont_pretraining')
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.lr)
align = Align()
reconstruct = Reconstruct()
self.model.copy_weight()
for epoch in range(self.num_epoch_pretrain):
if(epoch<start_epoch):
continue
print('Epoch:' + str(epoch + 1))
self.model.train()
tqdm_dataloader = tqdm(self.train_loader)
loss_sum = 0
loss_mse = 0
loss_ce = 0
hits_sum = 0
NDCG_sum = 0
for idx, batch in enumerate(tqdm_dataloader):
batch = [x.to(self.device) for x in batch]
self.optimizer.zero_grad()
[rep_mask, rep_mask_prediction], [token_prediction_prob, tokens] = self.model.pretrain_forward(batch[0])
align_loss = align.compute(rep_mask, rep_mask_prediction)
loss_mse += align_loss.item()
reconstruct_loss, hits, NDCG = reconstruct.compute(token_prediction_prob, tokens)
loss_ce += reconstruct_loss.item()
hits_sum += hits.item()
NDCG_sum += NDCG
loss = self.alpha * align_loss + self.beta * reconstruct_loss
loss.backward()
self.optimizer.step()
self.model.momentum_update()
loss_sum += loss.item()
print('pretrain epoch{0}, loss{1}, mse{2}, ce{3}, hits{4}, ndcg{5}'.format(epoch + 1, loss_sum / (idx + 1),
loss_mse / (idx + 1),
loss_ce / (idx + 1), hits_sum,
NDCG_sum / (idx + 1)))
if (epoch + 1) % 10 == 0:
torch.save(self.model.state_dict(), self.save_path + '/pretrain_model_epoch'+str(epoch+1)+'.pkl')
if (epoch + 1) % 3 == 0:
self.model.eval()
train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)
test_rep, test_label = get_rep_with_label(self.model, self.test_loader)
clf = fit_lr(train_rep, train_label)
acc = clf.score(test_rep, test_label)
print(acc)
if acc > eval_acc:
eval_acc = acc
torch.save(self.model.state_dict(), self.save_path + '/pretrain_model.pkl')
def finetune_CLIP(self):
eval_cosine = 0.0
freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value) for
(key, value) in [x.split("=") for x in opt.model_params]}
freq_model = FreqEncoder(**freq_model_options)
self.timefreq_model=TimeFreqEncoder(self.model,freq_model,self.args)
self.timefreq_model = self.timefreq_model.to(torch.device(self.device))
freqtime_state_dict = torch.load(self.save_path + '/timefreqmodel.pkl', map_location=self.device)
self.timefreq_model.load_state_dict(freqtime_state_dict)
self.timefreq_model.to(torch.device("cpu"))
freq_size=freq_model.output_size
time_size=self.model.d
clip_size=int(77*768)
self.alignmodel=AlignNet(time_size,freq_size,clip_size,self.timefreq_model)
self.alignmodel=self.alignmodel.to(torch.device(self.device))
print('CLIP_finetune')
self.optimizer = torch.optim.AdamW(self.alignmodel.parameters(), lr=self.args.lr)
|
parser = argparse.ArgumentParser(description="Template")
parser.add_argument('-mt','--model_type', default='FreqEncoder', help='')
parser.add_argument('-mp','--model_params', default='', nargs='*', help='list of key=value pairs of model options')
parser.add_argument('--pretrained_net', default='lstm__subject0_epoch_900.pth', help="path to pre-trained net")
# Parse arguments
opt = parser.parse_args()
def l1_regularization(model, lambda_):
l1_norm = 0
for param in model.parameters():
l1_norm += param.abs().sum()
l1_penalty = lambda_ * l1_norm
return l1_penalty
class Trainer():
def __init__(self, args, time_model, train_loader, train_linear_loader, test_loader, verbose=False):
self.args = args
self.verbose = verbose
self.device = args.device
self.print_process(self.device)
self.model = time_model.to(torch.device(self.device))
self.train_loader = train_loader
#self.train_linear_loader = train_linear_loader
self.train_linear_loader = train_loader
self.test_loader = test_loader
self.lr_decay = args.lr_decay_rate
self.lr_decay_steps = args.lr_decay_steps
self.cr = CE(self.model)
self.alpha = args.alpha
self.beta = args.beta
self.test_cr = torch.nn.CrossEntropyLoss()
self.num_epoch = args.num_epoch
self.num_epoch_pretrain = args.num_epoch_pretrain
self.eval_per_steps = args.eval_per_steps
self.save_path = args.save_path
self.step = 0
self.best_metric = -1e9
self.metric = 'acc'
def pretrain(self):
print('pretraining')
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.lr)
eval_acc = 0
align = Align()
reconstruct = Reconstruct()
self.model.copy_weight()
for epoch in range(self.num_epoch_pretrain):
print('Epoch:' + str(epoch+1))
self.model.train()
tqdm_dataloader = tqdm(self.train_loader)
loss_sum = 0
loss_mse = 0
loss_ce = 0
hits_sum = 0
NDCG_sum = 0
for idx, batch in enumerate(tqdm_dataloader):
batch = [x.to(self.device) for x in batch]
self.optimizer.zero_grad() # 梯度清零
[rep_mask, rep_mask_prediction], [token_prediction_prob, tokens] = self.model.pretrain_forward(batch[0])
align_loss = align.compute(rep_mask, rep_mask_prediction)
loss_mse += align_loss.item()
reconstruct_loss, hits, NDCG = reconstruct.compute(token_prediction_prob, tokens)
loss_ce += reconstruct_loss.item()
hits_sum += hits.item()
NDCG_sum += NDCG
loss = self.alpha * align_loss + self.beta * reconstruct_loss
loss.backward()
self.optimizer.step()
self.model.momentum_update()
loss_sum += loss.item()
print('pretrain epoch{0}, loss{1}, mse{2}, ce{3}, hits{4}, ndcg{5}'.format(epoch + 1, loss_sum / (idx + 1),
loss_mse / (idx + 1),
loss_ce / (idx + 1), hits_sum,
NDCG_sum / (idx + 1)))
if (epoch + 1) % 20 == 0:
torch.save(self.model.state_dict(), self.save_path + '/pretrain_model_epoch'+str(epoch+1)+'.pkl')
if (epoch + 1) % 3 == 0:
self.model.eval()
train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)
test_rep, test_label = get_rep_with_label(self.model, self.test_loader)
clf = fit_lr(train_rep, train_label)
acc = clf.score(test_rep, test_label)
print(acc)
if acc > eval_acc:
eval_acc = acc
torch.save(self.model.state_dict(), self.save_path + '/pretrain_model.pkl')
# It is worth noting that the highest pretraining accuracy does not mean the model is the
# best one for finetuning, so the one with larger training epoch should be used.
def finetune(self):
print('finetune')
self.model.linear_proba = True
#self.args.load_pretrained_model=False
if self.args.load_pretrained_model:
print('load pretrained model')
state_dict = torch.load(self.save_path + '/pretrain_model_epoch300.pkl', map_location=self.device)
try:
self.model.load_state_dict(state_dict)
except:
model_state_dict = self.model.state_dict()
for pretrain, random_intial in zip(state_dict, model_state_dict):
assert pretrain == random_intial
if pretrain in ['input_projection.weight', 'input_projection.bias', 'predict_head.weight',
'predict_head.bias', 'position.pe.weight']:
state_dict[pretrain] = model_state_dict[pretrain]
self.model.load_state_dict(state_dict)
self.model.eval()
train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)
test_rep, test_label = get_rep_with_label(self.model, self.test_loader)
clf = fit_lr(train_rep, train_label)
acc = clf.score(test_rep, test_label)
pred_label = np.argmax(clf.predict_proba(test_rep), axis=1)
f1 = f1_score(test_label, pred_label, average='macro')
print(acc, f1)
self.model.linear_proba = False #If linear_proba = True, freeze pretrained model, train only classifier
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)
self.scheduler = LambdaLR(self.optimizer, lr_lambda=lambda step: self.lr_decay ** step, verbose=self.verbose)
for epoch in range(self.num_epoch):
loss_epoch, time_cost = self._train_one_epoch()
self.print_process(
'Finetune epoch:{0},loss:{1},training_time:{2}'.format(epoch + 1, loss_epoch, time_cost))
if (epoch + 1) % 5 == 0:
torch.save(self.model.state_dict(),
self.save_path + '/finetune_model_epoch' + str(epoch + 1) + '.pkl')
self.print_process(self.best_metric)
return self.best_metric
def _train_one_epoch(self):
t0 = time.perf_counter()
self.model.train()
tqdm_dataloader = tqdm(self.train_linear_loader) if self.verbose else self.train_linear_loader
loss_sum = 0
pos=0
for idx, batch in enumerate(tqdm_dataloader):
batch = [x.to(self.device) for x in batch]
self.optimizer.zero_grad()
l1=l1_regularization(self.model,0.000003)
loss = self.cr.computeft(batch)#+l1
loss_sum += loss.item()
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5)
self.optimizer.step()
pos=pos+1
self.step += 1
# if self.step % self.eval_per_steps == 0:
metric = self.eval_model()
self.print_process(metric)
if metric[self.metric] >= self.best_metric:
torch.save(self.model.state_dict(), self.save_path + '/finetune_model.pkl')
self.best_metric = metric[self.metric]
self.model.train()
return loss_sum / (idx + 1), time.perf_counter() - t0
def eval_model(self):
self.model.eval()
tqdm_data_loader = tqdm(self.test_loader) if self.verbose else self.test_loader
metrics = {'acc': 0, 'f1': 0}
pred = []
label = []
test_loss = 0
with torch.no_grad():
for idx, batch in enumerate(tqdm_data_loader):
batch = [x.to(self.device) for x in batch]
ret = self.compute_metrics(batch)
if len(ret) == 2:
pred_b, label_b = ret
pred += pred_b
label += label_b
else:
pred_b, label_b, test_loss_b = ret
pred += pred_b
label += label_b
test_loss += test_loss_b.cpu().item()
print("aaa")
print(len(label))
confusion_mat = self._confusion_mat(label, pred)
self.print_process(confusion_mat)
if self.args.num_class == 2:
metrics['f1'] = f1_score(y_true=label, y_pred=pred)
metrics['precision'] = precision_score(y_true=label, y_pred=pred)
metrics['recall'] = recall_score(y_true=label, y_pred=pred)
else:
metrics['f1'] = f1_score(y_true=label, y_pred=pred, average='macro')
metrics['micro_f1'] = f1_score(y_true=label, y_pred=pred, average='micro')
metrics['acc'] = accuracy_score(y_true=label, y_pred=pred)
metrics['test_loss'] = test_loss / (idx + 1)
return metrics
def compute_metrics(self, batch):
seqs, label, clip, clip_moreinf = batch
lastrep, rep,scores = self.model(seqs)
_, pred = torch.topk(scores, 1)
test_loss = self.test_cr(scores, label.view(-1).long())
pred = pred.view(-1).tolist()
return pred, label.tolist(), test_loss
def compute_metrics_freq(self, batch,model):
#if len(batch) == 2:
seqs, label,clip,clip_moreinf = batch
lastrep, rep,scores = model(seqs)
#else:
# seqs1, seqs2, label = batch
# lastrep, rep, scores = self.model((seqs1, seqs2))
_, pred = torch.topk(scores, 1)
#print(np.shape(scores))
test_loss = self.test_cr(scores, label.view(-1).long())
pred = pred.view(-1).tolist()
return pred, label.tolist(), test_loss
def _confusion_mat(self, label, pred):
mat = np.zeros((self.args.num_class, self.args.num_class))
for _label, _pred in zip(label, pred):
mat[_label, _pred] += 1
return mat
def print_process(self, *x):
if self.verbose:
print(*x)
def cont_pretrain(self):
start_epoch=300
state_dict = torch.load(self.save_path + '/pretrain_model_epoch300.pkl', map_location=self.device)
eval_acc=0.0 # It should be modified.
self.model.load_state_dict(state_dict)
print('cont_pretraining')
self.optimizer = torch.optim.AdamW(self.model.parameters(), lr=self.args.lr)
align = Align()
reconstruct = Reconstruct()
self.model.copy_weight()
for epoch in range(self.num_epoch_pretrain):
if(epoch<start_epoch):
continue
print('Epoch:' + str(epoch + 1))
self.model.train()
tqdm_dataloader = tqdm(self.train_loader)
loss_sum = 0
loss_mse = 0
loss_ce = 0
hits_sum = 0
NDCG_sum = 0
for idx, batch in enumerate(tqdm_dataloader):
batch = [x.to(self.device) for x in batch]
self.optimizer.zero_grad()
[rep_mask, rep_mask_prediction], [token_prediction_prob, tokens] = self.model.pretrain_forward(batch[0])
align_loss = align.compute(rep_mask, rep_mask_prediction)
loss_mse += align_loss.item()
reconstruct_loss, hits, NDCG = reconstruct.compute(token_prediction_prob, tokens)
loss_ce += reconstruct_loss.item()
hits_sum += hits.item()
NDCG_sum += NDCG
loss = self.alpha * align_loss + self.beta * reconstruct_loss
loss.backward()
self.optimizer.step()
self.model.momentum_update()
loss_sum += loss.item()
print('pretrain epoch{0}, loss{1}, mse{2}, ce{3}, hits{4}, ndcg{5}'.format(epoch + 1, loss_sum / (idx + 1),
loss_mse / (idx + 1),
loss_ce / (idx + 1), hits_sum,
NDCG_sum / (idx + 1)))
if (epoch + 1) % 10 == 0:
torch.save(self.model.state_dict(), self.save_path + '/pretrain_model_epoch'+str(epoch+1)+'.pkl')
if (epoch + 1) % 3 == 0:
self.model.eval()
train_rep, train_label = get_rep_with_label(self.model, self.train_linear_loader)
test_rep, test_label = get_rep_with_label(self.model, self.test_loader)
clf = fit_lr(train_rep, train_label)
acc = clf.score(test_rep, test_label)
print(acc)
if acc > eval_acc:
eval_acc = acc
torch.save(self.model.state_dict(), self.save_path + '/pretrain_model.pkl')
def finetune_CLIP(self):
eval_cosine = 0.0
freq_model_options = {key: int(value) if value.isdigit() else (float(value) if value[0].isdigit() else value) for
(key, value) in [x.split("=") for x in opt.model_params]}
freq_model = FreqEncoder(**freq_model_options)
self.timefreq_model=TimeFreqEncoder(self.model,freq_model,self.args)
self.timefreq_model = self.timefreq_model.to(torch.device(self.device))
freqtime_state_dict = torch.load(self.save_path + '/timefreqmodel.pkl', map_location=self.device)
self.timefreq_model.load_state_dict(freqtime_state_dict)
self.timefreq_model.to(torch.device("cpu"))
freq_size=freq_model.output_size
time_size=self.model.d
clip_size=int(77*768)
self.alignmodel=AlignNet(time_size,freq_size,clip_size,self.timefreq_model)
self.alignmodel=self.alignmodel.to(torch.device(self.device))
print('CLIP_finetune')
self.optimizer = torch.optim.AdamW(self.alignmodel.parameters(), lr=self.args.lr) | CLIPloss = CM() | 3 | 2023-12-16 12:52:14+00:00 | 8k |
Rajeshwaran2001/DRM-Media-Tool | main.py | [
{
"identifier": "KeyGeter",
"path": "key_getter.py",
"snippet": "class KeyGeter(QWidget):\n def __init__(self, debug_logger, info_logger):\n super().__init__()\n self.debug_logger = debug_logger\n self.info_logger = info_logger\n self.init_ui()\n\n def init_ui(self):\n # Create layout\n layout = QVBoxLayout()\n\n # Create labels and input fields\n label1 = QLabel('PSSH:')\n label2 = QLabel('Licence URL:')\n label3 = QLabel('Name:')\n self.input1 = QLineEdit()\n self.input2 = QLineEdit()\n self.input3 = QLineEdit()\n\n # To have input and lable on same line\n row_layout1 = QHBoxLayout()\n row_layout1.addWidget(label1)\n row_layout1.addWidget(self.input1)\n layout.addLayout(row_layout1)\n\n row_layout2 = QHBoxLayout()\n row_layout2.addWidget(label2)\n row_layout2.addWidget(self.input2)\n layout.addLayout(row_layout2)\n\n row_layout3 = QHBoxLayout()\n row_layout3.addWidget(label3)\n row_layout3.addWidget(self.input3)\n layout.addLayout(row_layout3)\n\n # Create a button\n button = QPushButton('Submit')\n\n # Add labels, input fields, and button to the layout\n layout.addWidget(button)\n\n # Set the layout for the main window\n self.setLayout(layout)\n\n # Connect the button to a function (e.g., handle_button_click)\n button.clicked.connect(self.handle_button_click)\n\n # Create a text browser to display the API response\n self.response_browser = QTextBrowser()\n\n # Add the text browser to the layout\n layout.addWidget(self.response_browser)\n\n self.show()\n\n def handle_button_click(self):\n self.info_logger.info(\"Submit Button Clicked\")\n # Get user input from the input fields\n pssh = self.input1.text()\n license_url = self.input2.text()\n name = self.input3.text()\n # Check if any field is empty\n if not name:\n self.info_logger.info(\"Name Field is Empty\")\n\n if not pssh:\n self.info_logger.info(\"pssh Field is Empty\")\n\n if not license_url:\n self.info_logger.info(\"license_url Field is Empty\")\n\n conn = sqlite3.connect('db.db')\n self.info_logger.info(\"DB Connected Succesfully\")\n cursor = conn.cursor()\n # Create a table with columns if it doesn't exist\n cursor.execute('''CREATE TABLE IF NOT EXISTS pssh (\n pssh TEXT,\n license_url TEXT,\n movie_name TEXT\n )''')\n cursor.execute('''\n CREATE TABLE IF NOT EXISTS keys (\n key_id INTEGER PRIMARY KEY,\n key TEXT,\n pssh_id INTEGER,\n FOREIGN KEY (pssh_id) REFERENCES pssh (pssh_id)\n )\n ''')\n\n # Insert the values into the table\n cursor.execute(\"INSERT INTO pssh (pssh, license_url, movie_name) VALUES (?, ?, ?)\",\n (pssh, license_url, name))\n\n conn.commit()\n pssh_id = cursor.lastrowid\n\n # Construct the API request\n api_url = os.getenv(\"API_URL\")\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (Ktesttemp, like Gecko) Chrome/90.0.4430.85 Safari/537.36\",\n \"Content-Type\": \"application/json\",\n \"X-API-Key\": os.getenv(\"X_API_KEY\"),\n }\n # Check if either pssh or license_url is empty\n if not pssh or not license_url:\n error_message = \"Both 'pssh' and 'license_url' must be provided.\"\n show_error_message(self, error_message)\n self.debug_logger.debug(error_message)\n else:\n payload = {\n \"license_url\": license_url,\n \"pssh\": pssh,\n }\n\n # Make the API request\n response = requests.post(api_url, headers=headers, json=payload)\n self.info_logger.info(response)\n data = json.loads(response.text)\n key = None\n # print(data)\n self.info_logger.info(\"API response is: %s\", response)\n if response.status_code in [200, 302]:\n if \"keys\" in data:\n keys = data[\"keys\"]\n if isinstance(keys, list):\n if len(keys) == 1:\n for key_info in keys:\n if isinstance(key_info, str):\n key = key_info\n elif isinstance(key_info, dict) and \"key\" in key_info:\n key = key_info[\"key\"]\n else:\n print('error')\n continue\n cursor.execute(\n \"INSERT INTO keys (key, pssh_id) VALUES (?, ?)\", (key, pssh_id))\n # print(\"One key found\")\n self.info_logger.info(\"Single key found\")\n else:\n # key_strings = keys\n # key_string = ', '.join(key_strings)\n # part = key_string.replace(\n # '[', '').replace(']', '').replace(\"'\", \"\")\n # key_parts = part.split(', ')\n # key = \"\\n\".join(key_parts)\n # print(key)\n # print(\"Multiple keys found\")\n self.info_logger.info(\"Multiple keys found\")\n key_strings = keys\n for key_string in key_strings:\n key = key_string.replace(\n '[', '').replace(']', '').replace(\"'\", \"\")\n cursor.execute(\n \"INSERT INTO keys (key, pssh_id) VALUES (?, ?)\", (key, pssh_id))\n else:\n key = keys\n cursor.execute(\n \"INSERT INTO keys (key, pssh_id) VALUES (?, ?)\", (key, pssh_id))\n self.info_logger.info(\"Keys Found\")\n else:\n error_message = \"No 'key' or 'keys' found in the JSON data.\"\n show_error_message(self, error_message)\n self.debug_logger.debug(error_message)\n elif response.status_code == 400:\n try:\n error_message = response.json()[\"message\"]\n show_error_message(self, error_message)\n self.info_logger.info(error_message)\n except Exception as e:\n self.debug_logger.debug(e)\n else:\n error_message = \"API ERROR.\"\n show_error_message(self, error_message)\n self.info_logger.info(error_message)\n\n current_datetime = datetime.now().strftime(\"%Y-%m-%d %I:%M:%S %p\")\n event_data = {\n 'pssh': pssh,\n 'license_url': license_url,\n 'movie_name': name,\n 'keys': keys if \"keys\" in data else [],\n 'datetime': current_datetime,\n }\n\n # 'events' is the name of the collection\n events_ref = db.collection('events')\n events_ref.add(event_data)\n\n self.info_logger.info(\"Key Added to Globa Db\")\n\n # Display the API response in the text browser\n conn.commit()\n # Close the database connection\n conn.close()\n if key is not None:\n key_str = json.dumps(keys)\n self.response_browser.setText(key_str)\n # Clear the input fields\n self.input1.clear()\n self.input2.clear()\n self.input3.clear()\n else:\n error_message = \"No keys to display.\" # Customize this message as needed\n # show_error_message(self, error_message)\n self.debug_logger.debug(error_message)"
},
{
"identifier": "Decrypter",
"path": "decrypter.py",
"snippet": "class Decrypter(QWidget):\n def __init__(self, debug_logger, info_logger):\n super().__init__()\n self.init_ui()\n self.conn = None # Database connection\n self.cursor = None # Database cursor\n self.debug_logger = debug_logger\n self.info_logger = info_logger\n self.create_database()\n\n def init_ui(self):\n layout = QVBoxLayout()\n\n # Create a horizontal layout for the \"Select Folder\" and folder path\n select_folder_layout = QHBoxLayout()\n select_folder_label = QLabel(\"Select Folder:\")\n select_button = QPushButton(\"Select Folder\")\n select_button.clicked.connect(self.browse_folder)\n self.folder_path_lineedit = QLineEdit()\n\n select_folder_layout.addWidget(select_folder_label)\n select_folder_layout.addWidget(select_button)\n select_folder_layout.addWidget(self.folder_path_lineedit)\n\n layout.addLayout(select_folder_layout)\n\n # Create horizontal layout for buttons (Check Folder, GetKeys, Decrypt)\n buttons_layout = QHBoxLayout()\n\n check_folder_button = QPushButton(\"Check Folder\")\n check_folder_button.clicked.connect(self.check_folder_existence)\n buttons_layout.addWidget(check_folder_button)\n\n get_keys_button = QPushButton(\"Get Keys from DB\")\n get_keys_button.clicked.connect(self.get_keys_from_db)\n buttons_layout.addWidget(get_keys_button)\n\n decrypt_button = QPushButton(\"Decrypt\")\n decrypt_button.clicked.connect(self.decrypt_files)\n buttons_layout.addWidget(decrypt_button)\n\n merge_button = QPushButton(\"Media Merger\")\n merge_button.clicked.connect(self.merger)\n buttons_layout.addWidget(merge_button)\n\n layout.addLayout(buttons_layout)\n\n # Create a QListWidget for displaying search results\n layout.addWidget(QLabel(\"Search Results:\"))\n self.search_result_list = QListWidget()\n layout.addWidget(self.search_result_list)\n\n self.setLayout(layout)\n\n # Add these methods to handle button clicks\n def browse_folder(self):\n folder_path = QFileDialog.getExistingDirectory(self, \"Select Folder\")\n if folder_path:\n self.folder_path_lineedit.setText(folder_path)\n # self.search_database(folder_path)\n\n def check_folder_existence(self):\n folder_path = self.folder_path_lineedit.text()\n if os.path.exists(folder_path):\n show_success_message(self, \"Folder exists.\")\n self.info_logger.info(\"Folder exists.\")\n else:\n show_error_message(self, \"Folder does not exist.\")\n self.debug_logger.debug(\"Folder does not exist.\")\n\n def get_keys_from_db(self):\n folder_path = self.folder_path_lineedit.text()\n if os.path.exists(folder_path):\n keys_found = self.search_database(folder_path)\n # print(keys_found)\n if keys_found:\n success_message = \"Keys retrieved successfully.\"\n show_success_message(self, success_message)\n self.info_logger.info(success_message)\n else:\n # Customize this message as needed\n error_message = \"No keys found in the database.\"\n show_error_message(self, error_message)\n self.debug_logger.debug(error_message)\n else:\n show_error_message(self, \"No Folder Found.\")\n self.debug_logger.debug(\"No Folder Found.\")\n\n def decrypt_files(self):\n folder_path = self.folder_path_lineedit.text()\n if os.path.exists(folder_path):\n decrypt = self.decrypt_file(folder_path)\n if decrypt:\n success_message = \"Decryption successfully.\"\n show_success_message(self, success_message)\n self.info_logger.info(success_message)\n else:\n # Customize this message as needed\n error_message = \"Decryption Failed.\"\n show_error_message(self, error_message)\n self.debug_logger.debug(error_message)\n else:\n show_error_message(self, \"No Folder Selected.\")\n self.debug_logger.debug(\"No Folder Selected.\")\n\n def merger(self):\n folder_path = self.folder_path_lineedit.text()\n if os.path.exists(folder_path):\n merge = self.file_merger(folder_path)\n self.info_logger.info(\"Files Merged Succesfully\")\n else:\n show_error_message(self, \"No Folder Selected.\")\n self.debug_logger.debug(\"No Folder Selected.\")\n\n def create_database(self):\n self.conn = sqlite3.connect('db.db')\n self.cursor = self.conn.cursor()\n self.cursor.execute('''\n CREATE TABLE IF NOT EXISTS pssh (\n pssh_id INTEGER PRIMARY KEY,\n pssh TEXT,\n license_url TEXT,\n movie_name TEXT\n )\n ''')\n\n self.cursor.execute('''\n CREATE TABLE IF NOT EXISTS keys (\n key_id INTEGER PRIMARY KEY,\n key TEXT,\n pssh_id INTEGER,\n FOREIGN KEY(pssh_id) REFERENCES pssh(pssh_id)\n )\n ''')\n self.conn.commit()\n\n def search_database(self, folder_name):\n self.search_result_list.clear()\n\n # Search DB for entries with a movie_name that matches the folder_name\n query = \"SELECT rowid, movie_name FROM pssh WHERE movie_name = ?\"\n pattern = os.path.basename(folder_name)\n self.cursor.execute(query, (pattern,))\n results = self.cursor.fetchall()\n keys = False\n\n for result in results:\n rowid, movie_name = result\n self.search_result_list.addItem(\n f\"rowid: {rowid}, Movie Name: {movie_name}\")\n\n # Search for keys based on the pssh_id\n keys_query = \"SELECT key FROM keys WHERE pssh_id = ?\"\n self.cursor.execute(keys_query, (rowid,))\n keys = self.cursor.fetchall()\n\n if keys:\n keys_found = True # Set the flag if keys are found\n for key in keys:\n self.search_result_list.addItem(f\" Key: {key[0]}\")\n\n if not keys_found:\n # Customize this message as needed\n error_message = \"No keys found in DB.\"\n show_error_message(self, error_message)\n self.debug_logger.debug(error_message)\n else:\n self.info_logger.info(\"Keys Found in Database\")\n\n return keys_found\n\n def decrypt_file(self, folder_name):\n self.search_result_list.clear()\n # Search DB for entries with a movie_name that matches the folder_name\n query = \"SELECT rowid, movie_name FROM pssh WHERE movie_name = ?\"\n pattern = os.path.basename(folder_name)\n self.cursor.execute(query, (pattern,))\n results = self.cursor.fetchall()\n keys = []\n\n for result in results:\n rowid, movie_name = result\n self.search_result_list.addItem(\n f\"rowid: {rowid}, Movie Name: {movie_name}\")\n\n # Search for keys based on the pssh_id\n keys_query = \"SELECT key FROM keys WHERE pssh_id = ?\"\n self.cursor.execute(keys_query, (rowid,))\n keys = self.cursor.fetchall()\n\n # Get video and audio files in the selected folder\n video_audio_formats = ['.mp4', '.avi', '.webm',\n '.mkv', '.m4a', '.wav', '.flac', '.mp3']\n files = [f for f in os.listdir(folder_name) if os.path.isfile(\n os.path.join(folder_name, f))]\n video_audio_files = [f for f in files if os.path.splitext(\n f)[1].lower() in video_audio_formats]\n\n # self.search_result_list.addItem(\"\\nVideo and Audio Files:\")\n # print(files)\n for file in video_audio_files:\n # self.search_result_list.addItem(f\" {file}\")\n\n # Decrypt the file using mp4decrypt\n decrypted_file = os.path.splitext(\n file)[0] + \"_decrypted\" + os.path.splitext(file)[1]\n input_file_path = os.path.normpath(os.path.join(folder_name, file))\n output_file_path = os.path.normpath(\n os.path.join(folder_name, decrypted_file))\n\n decrypt_command = ['mp4decrypt']\n if not keys:\n # Customize this message as needed\n error_message = \"No key found in DB.\"\n show_error_message(self, error_message)\n else:\n for key in keys:\n decrypt_command.extend([\"--key\", key[0]])\n decrypt_command.extend([input_file_path, output_file_path])\n try:\n # print(decrypt_command)\n subprocess.run(decrypt_command, shell=True, check=True)\n self.search_result_list.addItem(\n f\" Decrypted File: {decrypted_file}\")\n # Remove the original input file\n os.remove(input_file_path)\n # Rename the decrypted file to the original file name\n os.rename(output_file_path, input_file_path)\n show_success_message(self, \"Decryption successfully Completed\")\n self.info_logger.info(\n \"Decryption of {decrypted_file} is successfully Completed\")\n # # Ask the user if they want to delete the encrypted file\n # reply = QMessageBox.question(\n # self, 'Delete Encrypted File',\n # 'Do you want to delete the encrypted file?',\n # QMessageBox.Yes | QMessageBox.No, QMessageBox.No\n # )\n # if reply == QMessageBox.Yes:\n # # Code to delete the encrypted file\n # self.search_result_list.addItem(\n # f\" Deleted Encrypted File: {QMessageBox.Yes}\")\n # show_success_message(self,\n # \"Encrypted file deleted successfully\")\n except subprocess.CalledProcessError as e:\n self.search_result_list.addItem(\n f\" Error decrypting file: {e}\")\n show_error_message(self, \"Error decrypting file\")\n self.debug_logger.debug(\"Error: {e}\")\n return bool(keys)\n\n def file_merger(self, folder_name):\n file_merger_dialog = FileMergerDialog(\n self.debug_logger, self.info_logger, folder_name)\n file_merger_dialog.exec_()"
},
{
"identifier": "setup_logging",
"path": "logger.py",
"snippet": "def setup_logging():\n # Get the directory of the script or the executable\n if getattr(sys, 'frozen', False): # if the application is frozen\n current_dir = os.path.dirname(sys.executable)\n else:\n current_dir = os.path.dirname(os.path.abspath(__file__))\n\n log_dir = os.path.join(current_dir, 'logs')\n os.makedirs(log_dir, exist_ok=True)\n\n info_log_file = os.path.join(log_dir, 'info.log')\n debug_log_file = os.path.join(log_dir, 'debug.log')\n\n # Configuration for info_logger\n info_logger = logging.getLogger('info_logger')\n info_handler = logging.FileHandler(info_log_file)\n info_formatter = logging.Formatter(\n '%(asctime)s - %(levelname)s - %(message)s')\n info_handler.setFormatter(info_formatter)\n info_logger.addHandler(info_handler)\n info_logger.setLevel(logging.INFO)\n\n # Configuration for debug_logger\n debug_logger = logging.getLogger('debug_logger')\n debug_handler = logging.FileHandler(debug_log_file)\n debug_formatter = logging.Formatter(\n '%(asctime)s - %(levelname)s - %(message)s')\n debug_handler.setFormatter(debug_formatter)\n debug_logger.addHandler(debug_handler)\n debug_logger.setLevel(logging.DEBUG)\n\n return info_logger, debug_logger"
},
{
"identifier": "__version__",
"path": "version.py",
"snippet": "CHANNEL = 'Beta'"
}
] | import sys
import platform
import webbrowser
import os
from PyQt5.QtWidgets import QApplication, QMainWindow, QTabWidget, QMessageBox, QAction, QMenu
from PyQt5.QtGui import QIcon
from key_getter import KeyGeter
from decrypter import Decrypter
from logger import setup_logging
from version import __version__, CHANNEL | 4,894 |
info_logger, debug_logger = setup_logging()
current_dir = os.path.dirname(os.path.abspath(__file__))
icon = os.path.join(current_dir, 'assets', 'logo.ico')
git = os.path.join(current_dir, 'assets', 'github.png')
discord = os.path.join(current_dir, 'assets', 'discord.svg')
bug = os.path.join(current_dir, 'assets', 'bug.svg')
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowIcon(QIcon(icon))
self.init_ui()
def init_ui(self):
self.setWindowTitle(f"DRM & Media Tool {__version__} ({CHANNEL})")
self.setGeometry(100, 100, 650, 350)
# Create the tab widget
tab_widget = QTabWidget(self)
# Create the menu bar
menu_bar = self.menuBar()
# Create the Help menu
help_menu = menu_bar.addMenu('Help')
# Create "Tools Used" action
tools_used_action = QAction('Tools Used', self)
tools_used_action.triggered.connect(self.show_tools_used)
help_menu.addAction(tools_used_action)
# Create "About" action
about_action = QAction('About', self)
about_action.triggered.connect(self.show_about)
help_menu.addAction(about_action)
feature_bug_menu = QMenu('Feature/Bug', self)
request_feature_bug_action = QAction(
'Request a New Feature or Report Bug', self)
request_feature_bug_action.triggered.connect(
self.open_feature_bug_form)
feature_bug_menu.addAction(request_feature_bug_action)
menu_bar.addMenu(feature_bug_menu)
help_menu = menu_bar.addMenu('Discord')
open_discord_action = QAction('Open Discord', self)
open_discord_action.triggered.connect(self.open_discord)
help_menu.addAction(open_discord_action)
# Create tabs
|
info_logger, debug_logger = setup_logging()
current_dir = os.path.dirname(os.path.abspath(__file__))
icon = os.path.join(current_dir, 'assets', 'logo.ico')
git = os.path.join(current_dir, 'assets', 'github.png')
discord = os.path.join(current_dir, 'assets', 'discord.svg')
bug = os.path.join(current_dir, 'assets', 'bug.svg')
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowIcon(QIcon(icon))
self.init_ui()
def init_ui(self):
self.setWindowTitle(f"DRM & Media Tool {__version__} ({CHANNEL})")
self.setGeometry(100, 100, 650, 350)
# Create the tab widget
tab_widget = QTabWidget(self)
# Create the menu bar
menu_bar = self.menuBar()
# Create the Help menu
help_menu = menu_bar.addMenu('Help')
# Create "Tools Used" action
tools_used_action = QAction('Tools Used', self)
tools_used_action.triggered.connect(self.show_tools_used)
help_menu.addAction(tools_used_action)
# Create "About" action
about_action = QAction('About', self)
about_action.triggered.connect(self.show_about)
help_menu.addAction(about_action)
feature_bug_menu = QMenu('Feature/Bug', self)
request_feature_bug_action = QAction(
'Request a New Feature or Report Bug', self)
request_feature_bug_action.triggered.connect(
self.open_feature_bug_form)
feature_bug_menu.addAction(request_feature_bug_action)
menu_bar.addMenu(feature_bug_menu)
help_menu = menu_bar.addMenu('Discord')
open_discord_action = QAction('Open Discord', self)
open_discord_action.triggered.connect(self.open_discord)
help_menu.addAction(open_discord_action)
# Create tabs | hello_tab = KeyGeter(debug_logger, info_logger) | 0 | 2023-12-18 11:50:40+00:00 | 8k |
gmum/ViewingDirectionGaussianSplatting | scene/dataset_readers.py | [
{
"identifier": "read_extrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras"
},
{
"identifier": "qvec2rotmat",
"path": "scene/colmap_loader.py",
"snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])"
},
{
"identifier": "read_extrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images"
},
{
"identifier": "read_intrinsics_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras"
},
{
"identifier": "read_points3D_binary",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors"
},
{
"identifier": "read_points3D_text",
"path": "scene/colmap_loader.py",
"snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors"
},
{
"identifier": "getWorld2View2",
"path": "utils/graphics_utils.py",
"snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)"
},
{
"identifier": "focal2fov",
"path": "utils/graphics_utils.py",
"snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))"
},
{
"identifier": "fov2focal",
"path": "utils/graphics_utils.py",
"snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))"
},
{
"identifier": "SH2RGB",
"path": "utils/sh_utils.py",
"snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5"
},
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)"
}
] | import os
import sys
import numpy as np
import json
from PIL import Image
from typing import NamedTuple
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
from pathlib import Path
from plyfile import PlyData, PlyElement
from utils.sh_utils import SH2RGB
from scene.gaussian_model import BasicPointCloud | 4,242 | #
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact [email protected]
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") | cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) | 0 | 2023-12-21 10:09:17+00:00 | 8k |
gauravsdeshmukh/ChemGCN | train_chemgcn.py | [
{
"identifier": "ChemGCN",
"path": "chem_gcn/model.py",
"snippet": "class ChemGCN(nn.Module):\n \"\"\"\n Create a graph neural network to predict water solubilities.\n \"\"\"\n\n def __init__(\n self,\n node_vec_len: int,\n node_fea_len: int,\n hidden_fea_len: int,\n n_conv: int,\n n_hidden: int,\n n_outputs: int,\n p_dropout: float = 0.0,\n ):\n \"\"\"\n Class for the ChemGCN model\n\n Parameters\n ----------\n node_vec_len : int\n Node vector length\n node_fea_len : int\n Node feature length\n hidden_fea_len : int\n Hidden feature length (number of nodes in hidden layer)\n n_conv : int\n Number of convolution layers\n n_hidden : int\n Number of hidden layers\n n_outputs : int\n Number of outputs\n p_dropout : float, optional\n Probability (0<=p_dropout<1) that a node is dropped out. The default is 0.\n\n \"\"\"\n\n # Call constructor of base class\n super().__init__()\n\n # Define layers\n # Initial transformation from node matrix to node features\n self.init_transform = nn.Linear(node_vec_len, node_fea_len)\n\n # Convolution layers\n self.conv_layers = nn.ModuleList(\n [\n ConvolutionLayer(\n node_in_len=node_fea_len,\n node_out_len=node_fea_len,\n )\n for i in range(n_conv)\n ]\n )\n\n # Pool convolution outputs\n self.pooling = PoolingLayer()\n pooled_node_fea_len = node_fea_len\n\n # Pooling activation\n self.pooling_activation = nn.LeakyReLU()\n\n # From pooled vector to hidden layers\n self.pooled_to_hidden = nn.Linear(pooled_node_fea_len, hidden_fea_len)\n\n # Hidden layer\n self.hidden_layer = nn.Linear(hidden_fea_len, hidden_fea_len)\n\n # Hidden layer activation function\n self.hidden_activation = nn.LeakyReLU()\n\n # Hidden layer dropout\n self.dropout = nn.Dropout(p=p_dropout)\n\n # If hidden layers more than 1, add more hidden layers\n self.n_hidden = n_hidden\n if self.n_hidden > 1:\n self.hidden_layers = nn.ModuleList(\n [self.hidden_layer for _ in range(n_hidden - 1)]\n )\n self.hidden_activation_layers = nn.ModuleList(\n [self.hidden_activation for _ in range(n_hidden - 1)]\n )\n self.hidden_dropout_layers = nn.ModuleList(\n [self.dropout for _ in range(n_hidden - 1)]\n )\n\n # Final layer going to the output\n self.hidden_to_output = nn.Linear(hidden_fea_len, n_outputs)\n\n def forward(self, node_mat, adj_mat):\n \"\"\"\n Forward pass\n\n Parameters\n ----------\n node_mat : torch.Tensor with shape (batch_size, max_atoms, node_vec_len)\n Node matrices\n adj_mat : torch.Tensor with shape (batch_size, max_atoms, max_atoms)\n Adjacency matrices\n\n Returns\n -------\n out : torch.Tensor with shape (batch_size, n_outputs)\n Output tensor\n \"\"\"\n # Perform initial transform on node_mat\n node_fea = self.init_transform(node_mat)\n\n # Perform convolutions\n for conv in self.conv_layers:\n node_fea = conv(node_fea, adj_mat)\n\n # Perform pooling\n pooled_node_fea = self.pooling(node_fea)\n pooled_node_fea = self.pooling_activation(pooled_node_fea)\n\n # First hidden layer\n hidden_node_fea = self.pooled_to_hidden(pooled_node_fea)\n hidden_node_fea = self.hidden_activation(hidden_node_fea)\n hidden_node_fea = self.dropout(hidden_node_fea)\n\n # Subsequent hidden layers\n if self.n_hidden > 1:\n for i in range(self.n_hidden - 1):\n hidden_node_fea = self.hidden_layers[i](hidden_node_fea)\n hidden_node_fea = self.hidden_activation_layers[i](hidden_node_fea)\n hidden_node_fea = self.hidden_dropout_layers[i](hidden_node_fea)\n\n # Output\n out = self.hidden_to_output(hidden_node_fea)\n\n return out"
},
{
"identifier": "train_model",
"path": "chem_gcn/utils.py",
"snippet": "def train_model(\n epoch,\n model,\n training_dataloader,\n optimizer,\n loss_fn,\n standardizer,\n use_GPU,\n max_atoms,\n node_vec_len,\n):\n \"\"\"\n Execute training of one epoch for the ChemGCN model.\n\n Parameters\n ----------\n epoch : int\n Current epoch\n model : ChemGCN\n ChemGCN model object\n training_dataloader : data.DataLoader\n Training DataLoader\n optimizer : torch.optim.Optimizer\n Model optimizer\n loss_fn : like nn.MSELoss()\n Model loss function\n standardizer : Standardizer\n Standardizer object\n use_GPU: bool\n Whether to use GPU\n max_atoms: int\n Maximum number of atoms in graph\n node_vec_len: int\n Maximum node vector length in graph\n\n Returns\n -------\n avg_loss : float\n Training loss averaged over batches\n avg_mae : float\n Training MAE averaged over batches\n \"\"\"\n\n # Create variables to store losses and error\n avg_loss = 0\n avg_mae = 0\n count = 0\n\n # Switch model to train mode\n model.train()\n\n # Go over each batch in the dataloader\n for i, dataset in enumerate(training_dataloader):\n # Unpack data\n node_mat = dataset[0][0]\n adj_mat = dataset[0][1]\n output = dataset[1]\n\n # Reshape inputs\n first_dim = int((torch.numel(node_mat)) / (max_atoms * node_vec_len))\n node_mat = node_mat.reshape(first_dim, max_atoms, node_vec_len)\n adj_mat = adj_mat.reshape(first_dim, max_atoms, max_atoms)\n\n # Standardize output\n output_std = standardizer.standardize(output)\n\n # Package inputs and outputs; check if GPU is enabled\n if use_GPU:\n nn_input = (node_mat.cuda(), adj_mat.cuda())\n nn_output = output_std.cuda()\n else:\n nn_input = (node_mat, adj_mat)\n nn_output = output_std\n\n # Compute output from network\n nn_prediction = model(*nn_input)\n\n # Calculate loss\n loss = loss_fn(nn_output, nn_prediction)\n avg_loss += loss\n\n # Calculate MAE\n prediction = standardizer.restore(nn_prediction.detach().cpu())\n mae = mean_absolute_error(output, prediction)\n avg_mae += mae\n\n # Set zero gradients for all tensors\n optimizer.zero_grad()\n\n # Do backward prop\n loss.backward()\n\n # Update optimizer parameters\n optimizer.step()\n\n # Increase count\n count += 1\n\n # Calculate avg loss and MAE\n avg_loss = avg_loss.detach().cpu().numpy() / count\n avg_mae = avg_mae / count\n\n # Print stats\n print(\n \"Epoch: [{0}]\\tTraining Loss: [{1:.2f}]\\tTraining MAE: [{2:.2f}]\".format(\n epoch, avg_loss, avg_mae\n )\n )\n\n # Return loss and MAE\n return avg_loss, avg_mae"
},
{
"identifier": "test_model",
"path": "chem_gcn/utils.py",
"snippet": "def test_model(\n model,\n test_dataloader,\n loss_fn,\n standardizer,\n use_GPU,\n max_atoms,\n node_vec_len,\n):\n \"\"\"\n Test the ChemGCN model.\n\n Parameters\n ----------\n model : ChemGCN\n ChemGCN model object\n test_dataloader : data.DataLoader\n Test DataLoader\n loss_fn : like nn.MSELoss()\n Model loss function\n standardizer : Standardizer\n Standardizer object\n use_GPU: bool\n Whether to use GPU\n max_atoms: int\n Maximum number of atoms in graph\n node_vec_len: int\n Maximum node vector length in graph\n\n Returns\n -------\n test_loss : float\n Test loss\n test_mae : float\n Test MAE\n \"\"\"\n\n # Create variables to store losses and error\n test_loss = 0\n test_mae = 0\n count = 0\n\n # Switch model to train mode\n model.eval()\n\n # Go over each batch in the dataloader\n for i, dataset in enumerate(test_dataloader):\n # Unpack data\n node_mat = dataset[0][0]\n adj_mat = dataset[0][1]\n output = dataset[1]\n\n # Reshape inputs\n first_dim = int((torch.numel(node_mat)) / (max_atoms * node_vec_len))\n node_mat = node_mat.reshape(first_dim, max_atoms, node_vec_len)\n adj_mat = adj_mat.reshape(first_dim, max_atoms, max_atoms)\n\n # Standardize output\n output_std = standardizer.standardize(output)\n\n # Package inputs and outputs; check if GPU is enabled\n if use_GPU:\n nn_input = (node_mat.cuda(), adj_mat.cuda())\n nn_output = output_std.cuda()\n else:\n nn_input = (node_mat, adj_mat)\n nn_output = output_std\n\n # Compute output from network\n nn_prediction = model(*nn_input)\n\n # Calculate loss\n loss = loss_fn(nn_output, nn_prediction)\n test_loss += loss\n\n # Calculate MAE\n prediction = standardizer.restore(nn_prediction.detach().cpu())\n mae = mean_absolute_error(output, prediction)\n test_mae += mae\n\n # Increase count\n count += 1\n\n # Calculate avg loss and MAE\n test_loss = test_loss.detach().cpu().numpy() / count\n test_mae = test_mae / count\n\n # Return loss and MAE\n return test_loss, test_mae"
},
{
"identifier": "parity_plot",
"path": "chem_gcn/utils.py",
"snippet": "def parity_plot(\n save_dir,\n model,\n test_dataloader,\n standardizer,\n use_GPU,\n max_atoms,\n node_vec_len,\n):\n \"\"\"\n Create a parity plot for the ChemGCN model.\n\n Parameters\n ----------\n save_dir: str\n Name of directory to store the parity plot in\n model : ChemGCN\n ChemGCN model object\n test_dataloader : data.DataLoader\n Test DataLoader\n standardizer : Standardizer\n Standardizer object\n use_GPU: bool\n Whether to use GPU\n max_atoms: int\n Maximum number of atoms in graph\n node_vec_len: int\n Maximum node vector length in graph\n\n \"\"\"\n\n # Create variables to store losses and error\n outputs = []\n predictions = []\n\n # Switch model to train mode\n model.eval()\n\n # Go over each batch in the dataloader\n for i, dataset in enumerate(test_dataloader):\n # Unpack data\n node_mat = dataset[0][0]\n adj_mat = dataset[0][1]\n output = dataset[1]\n\n # Reshape inputs\n first_dim = int((torch.numel(node_mat)) / (max_atoms * node_vec_len))\n node_mat = node_mat.reshape(first_dim, max_atoms, node_vec_len)\n adj_mat = adj_mat.reshape(first_dim, max_atoms, max_atoms)\n\n # Package inputs and outputs; check if GPU is enabled\n if use_GPU:\n nn_input = (node_mat.cuda(), adj_mat.cuda())\n else:\n nn_input = (node_mat, adj_mat)\n\n # Compute output from network\n nn_prediction = model(*nn_input)\n\n # Calculate MAE\n prediction = standardizer.restore(nn_prediction.detach().cpu())\n\n # Add to list\n outputs.append(output)\n predictions.append(prediction)\n\n # Flatten\n outputs_arr = np.concatenate(outputs)\n preds_arr = np.concatenate(predictions)\n\n # Create plot\n fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=500)\n ax.scatter(\n outputs_arr, preds_arr, marker=\"o\", color=\"mediumseagreen\", edgecolor=\"black\"\n )\n\n min_plot = min(ax.get_xlim()[0], ax.get_ylim()[0])\n max_plot = max(ax.get_xlim()[1], ax.get_ylim()[1])\n min_plot = (1 - np.sign(min_plot) * 0.2) * min_plot\n max_plot = (1 + np.sign(max_plot) * 0.2) * max_plot\n\n ax.plot([min_plot, max_plot], [min_plot, max_plot], linestyle=\"-\", color=\"black\")\n ax.margins(x=0, y=0)\n ax.set_xlim([min_plot, max_plot])\n ax.set_ylim([min_plot, max_plot])\n ax.set_xlabel(\"Measured values (log mols/l)\")\n ax.set_ylabel(\"ChemGCN predictions (log mols/l)\")\n ax.set_title(\"Parity plot\")\n fig.tight_layout()\n fig.savefig(os.path.join(save_dir, \"parity_plot.png\"))"
},
{
"identifier": "loss_curve",
"path": "chem_gcn/utils.py",
"snippet": "def loss_curve(save_dir, epochs, losses):\n \"\"\"\n Make a loss curve.\n\n Parameters\n ----------\n save_dir: str\n Name of directory to store plot in\n epochs: list\n List of epochs\n losses: list\n List of losses\n\n \"\"\"\n fig, ax = plt.subplots(1, 1, figsize=(8, 5), dpi=500)\n ax.plot(epochs, losses, marker=\"o\", linestyle=\"--\", color=\"royalblue\")\n ax.set_xlabel(\"Epoch\")\n ax.set_ylabel(\"Mean squared loss\")\n ax.set_title(\"Loss curve\")\n fig.tight_layout()\n fig.savefig(os.path.join(save_dir, \"loss_curve.png\"))"
},
{
"identifier": "Standardizer",
"path": "chem_gcn/utils.py",
"snippet": "class Standardizer:\n def __init__(self, X):\n \"\"\"\n Class to standardize ChemGCN outputs\n\n Parameters\n ----------\n X : torch.Tensor\n Tensor of outputs\n \"\"\"\n self.mean = torch.mean(X)\n self.std = torch.std(X)\n\n def standardize(self, X):\n \"\"\"\n Convert a non-standardized output to a standardized output\n\n Parameters\n ----------\n X : torch.Tensor\n Tensor of non-standardized outputs\n\n Returns\n -------\n Z : torch.Tensor\n Tensor of standardized outputs\n\n \"\"\"\n Z = (X - self.mean) / (self.std)\n return Z\n\n def restore(self, Z):\n \"\"\"\n Restore a standardized output to the non-standardized output\n\n Parameters\n ----------\n Z : torch.Tensor\n Tensor of standardized outputs\n\n Returns\n -------\n X : torch.Tensor\n Tensor of non-standardized outputs\n\n \"\"\"\n X = self.mean + Z * self.std\n return X\n\n def state(self):\n \"\"\"\n Return dictionary of the state of the Standardizer\n\n Returns\n -------\n dict\n Dictionary with the mean and std of the outputs\n\n \"\"\"\n return {\"mean\": self.mean, \"std\": self.std}\n\n def load(self, state):\n \"\"\"\n Load a dictionary containing the state of the Standardizer and assign mean and std\n\n Parameters\n ----------\n state : dict\n Dictionary containing mean and std\n \"\"\"\n self.mean = state[\"mean\"]\n self.std = state[\"std\"]"
},
{
"identifier": "GraphData",
"path": "chem_gcn/graphs.py",
"snippet": "class GraphData(Dataset):\n def __init__(self, dataset_path: str, node_vec_len: int, max_atoms: int):\n \"\"\"\n GraphData class inheriting from the Dataset class in PyTorch.\n\n Parameters\n ----------\n dataset_path: str\n Path to the dataset file\n node_vec_len : int\n Node vector length of molecular graphs\n max_atoms : int\n Maximum number of atoms in molecular graphs\n \"\"\"\n\n # Save attributes\n self.node_vec_len = node_vec_len\n self.max_atoms = max_atoms\n\n # Open dataset file\n df = pd.read_csv(dataset_path)\n\n # Create lists\n self.indices = df.index.to_list()\n self.smiles = df[\"smiles\"].to_list()\n self.outputs = df[\"measured log solubility in mols per litre\"].to_list()\n\n def __len__(self):\n \"\"\"\n Get length of the dataset\n\n Returns\n -------\n Length of dataset\n \"\"\"\n return len(self.indices)\n\n def __getitem__(self, i: int):\n \"\"\"\n Returns node matrix, adjacency matrix, output, and SMILES string of\n molecule.\n\n Parameters\n ----------\n i : int\n Dataset index\n\n Returns\n -------\n node_mat : torch.Tensor with dimension (max_atoms,node_vec_len)\n Node matrix\n adj_mat: torch.Tensor with dimension (max_atoms,max_atoms)\n Adjacency matrix\n output : torch.Tensor with dimension n_outputs\n Output vector\n smile : str\n SMILES string of molecule\n \"\"\"\n\n # Get smile\n smile = self.smiles[i]\n\n # Create MolGraph object\n mol = Graph(smile, self.node_vec_len, self.max_atoms)\n\n # Get matrices\n node_mat = torch.Tensor(mol.node_mat)\n adj_mat = torch.Tensor(mol.adj_mat)\n\n # Get output\n output = torch.Tensor([self.outputs[i]])\n\n return (node_mat, adj_mat), output, smile\n\n def get_atom_no_sum(self, i):\n \"\"\"\n Get sum of the atomic numbers of all molecules in the dataset\n\n Parameters\n ----------\n i : int\n Dataset index.\n\n Returns\n -------\n atomic_no_sum: int\n Sum of all atomic numbers\n \"\"\"\n # Get smile\n smile = self.smiles[i]\n\n # Create MolGraph object\n mol = Graph(smile, self.node_vec_len, self.max_atoms)\n\n # Get matrices\n node_mat = mol.node_mat\n\n # Get atomic number sum\n one_pos_mat = np.argwhere(node_mat == 1)\n atomic_no_sum = one_pos_mat[:, -1].sum()\n return atomic_no_sum"
},
{
"identifier": "collate_graph_dataset",
"path": "chem_gcn/graphs.py",
"snippet": "def collate_graph_dataset(dataset: Dataset):\n \"\"\"\n Collate function for the GraphData dataset.\n\n Parameters\n ----------\n dataset : GraphData\n Object of the GraphData class.\n\n Returns\n -------\n node_mats_tensor, adj_mats_tensor : tuple of two torch.Tensor objects\n Node matrices with dimensions (batch_size * max_atoms, node_vec_len) and\n adjacency matrices with dimensions (batch_size * max_atoms, max_atoms)\n outputs_tensor : torch.Tensor with dimensions (batch_size, n_outputs)\n Tensor containing outputs.\n smiles : list\n List of size batch_size containing SMILES strings.\n \"\"\"\n\n # Create empty lists of node and adjacency matrices, outputs, and smiles\n node_mats = []\n adj_mats = []\n outputs = []\n smiles = []\n\n # Iterate over list and assign each component to the correct list\n for i in range(len(dataset)):\n (node_mat, adj_mat), output, smile = dataset[i]\n node_mats.append(node_mat)\n adj_mats.append(adj_mat)\n outputs.append(output)\n smiles.append(smile)\n\n # Create tensors\n node_mats_tensor = torch.cat(node_mats, dim=0)\n adj_mats_tensor = torch.cat(adj_mats, dim=0)\n outputs_tensor = torch.stack(outputs, dim=0)\n\n # Return tensors\n return (node_mats_tensor, adj_mats_tensor), outputs_tensor, smiles"
}
] | import numpy as np
import torch
from pathlib import Path
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from chem_gcn.model import ChemGCN
from chem_gcn.utils import (
train_model,
test_model,
parity_plot,
loss_curve,
Standardizer,
)
from chem_gcn.graphs import GraphData, collate_graph_dataset | 5,653 | """Train a ChemGCN model."""
#### Fix seeds
np.random.seed(0)
torch.manual_seed(0)
use_GPU = torch.cuda.is_available()
#### Inputs
max_atoms = 200
node_vec_len = 60
train_size = 0.7
batch_size = 32
hidden_nodes = 60
n_conv_layers = 4
n_hidden_layers = 2
learning_rate = 0.01
n_epochs = 50
#### Start by creating dataset
main_path = Path(__file__).resolve().parent
data_path = main_path / "data" / "solubility_data.csv"
dataset = GraphData(
dataset_path=data_path, max_atoms=max_atoms, node_vec_len=node_vec_len
)
#### Split data into training and test sets
# Get train and test sizes
dataset_indices = np.arange(0, len(dataset), 1)
train_size = int(np.round(train_size * len(dataset)))
test_size = len(dataset) - train_size
# Randomly sample train and test indices
train_indices = np.random.choice(dataset_indices, size=train_size, replace=False)
test_indices = np.array(list(set(dataset_indices) - set(train_indices)))
# Create dataoaders
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=collate_graph_dataset,
)
test_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=test_sampler,
collate_fn=collate_graph_dataset,
)
#### Initialize model, standardizer, optimizer, and loss function
# Model
model = ChemGCN(
node_vec_len=node_vec_len,
node_fea_len=hidden_nodes,
hidden_fea_len=hidden_nodes,
n_conv=n_conv_layers,
n_hidden=n_hidden_layers,
n_outputs=1,
p_dropout=0.1,
)
# Transfer to GPU if needed
if use_GPU:
model.cuda()
# Standardizer
outputs = [dataset[i][1] for i in range(len(dataset))]
standardizer = Standardizer(torch.Tensor(outputs))
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Loss function
loss_fn = torch.nn.MSELoss()
#### Train the model
loss = []
mae = []
epoch = []
for i in range(n_epochs):
epoch_loss, epoch_mae = train_model(
i,
model,
train_loader,
optimizer,
loss_fn,
standardizer,
use_GPU,
max_atoms,
node_vec_len,
)
loss.append(epoch_loss)
mae.append(epoch_mae)
epoch.append(i)
#### Test the model
# Call test model function
| """Train a ChemGCN model."""
#### Fix seeds
np.random.seed(0)
torch.manual_seed(0)
use_GPU = torch.cuda.is_available()
#### Inputs
max_atoms = 200
node_vec_len = 60
train_size = 0.7
batch_size = 32
hidden_nodes = 60
n_conv_layers = 4
n_hidden_layers = 2
learning_rate = 0.01
n_epochs = 50
#### Start by creating dataset
main_path = Path(__file__).resolve().parent
data_path = main_path / "data" / "solubility_data.csv"
dataset = GraphData(
dataset_path=data_path, max_atoms=max_atoms, node_vec_len=node_vec_len
)
#### Split data into training and test sets
# Get train and test sizes
dataset_indices = np.arange(0, len(dataset), 1)
train_size = int(np.round(train_size * len(dataset)))
test_size = len(dataset) - train_size
# Randomly sample train and test indices
train_indices = np.random.choice(dataset_indices, size=train_size, replace=False)
test_indices = np.array(list(set(dataset_indices) - set(train_indices)))
# Create dataoaders
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=train_sampler,
collate_fn=collate_graph_dataset,
)
test_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=test_sampler,
collate_fn=collate_graph_dataset,
)
#### Initialize model, standardizer, optimizer, and loss function
# Model
model = ChemGCN(
node_vec_len=node_vec_len,
node_fea_len=hidden_nodes,
hidden_fea_len=hidden_nodes,
n_conv=n_conv_layers,
n_hidden=n_hidden_layers,
n_outputs=1,
p_dropout=0.1,
)
# Transfer to GPU if needed
if use_GPU:
model.cuda()
# Standardizer
outputs = [dataset[i][1] for i in range(len(dataset))]
standardizer = Standardizer(torch.Tensor(outputs))
# Optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Loss function
loss_fn = torch.nn.MSELoss()
#### Train the model
loss = []
mae = []
epoch = []
for i in range(n_epochs):
epoch_loss, epoch_mae = train_model(
i,
model,
train_loader,
optimizer,
loss_fn,
standardizer,
use_GPU,
max_atoms,
node_vec_len,
)
loss.append(epoch_loss)
mae.append(epoch_mae)
epoch.append(i)
#### Test the model
# Call test model function | test_loss, test_mae = test_model( | 2 | 2023-12-21 07:46:28+00:00 | 8k |
Ruiyuan-Zhang/CCS | multi_part_assembly/models/wx_transformer/wx_transformers.py | [
{
"identifier": "TransformerEncoderLayer",
"path": "multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py",
"snippet": "class TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None):\n super().__init__()\n\n self.blockatt = blockatt\n self.blockatt_memory = blockatt_memory\n\n self.embed_dim = args.encoder_embed_dim\n self.quant_noise = getattr(args, \"quant_noise_pq\", 0)\n self.quant_noise_block_size = getattr(args, \"quant_noise_pq_block_size\", 8)\n\n\n self.use_nfm = use_nfm\n\n print('using nfm?', self.use_nfm)\n\n self.nb = nb\n\n self.norm_blocks = self.nb\n\n self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args\n self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)\n self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, \"activation_fn\", \"relu\")\n )\n\n print(\"SETUP TRANSFORMER LAYER\", 'blocks', self.nb)\n\n activation_dropout_p = getattr(args, \"activation_dropout\", 0)\n if activation_dropout_p == 0:\n # for backwards compatibility with models that use args.relu_dropout\n activation_dropout_p = getattr(args, \"relu_dropout\", 0)\n self.activation_dropout_module = FairseqDropout(\n float(activation_dropout_p), module_name=self.__class__.__name__\n )\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = self.build_fc1(\n self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size\n )\n self.fc2 = self.build_fc2(\n args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size\n )\n\n self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)\n\n if self.blockatt:\n self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm)\n self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)\n\n if self.blockatt_memory:\n memory_slots = 4\n memory_head_size = 128\n memory_num_heads = 1\n gate_style = 'memory'\n print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size)\n\n self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim,\n num_heads=memory_num_heads, num_blocks=1, forget_bias=1., input_bias=0.,\n attention_mlp_layers=5, gate_style=gate_style)\n\n #self.n_blocks_val * self.block_dim_val\n #self.block_dim_val = dim_val // self.n_blocks_val\n self.memory_attention = MemoryAttention(n_blocks_query=self.nb, n_blocks_val=8, dim_query=self.embed_dim, dim_val=memory_head_size*memory_num_heads*memory_slots)\n self.self_mem_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks)\n\n #self.competition = GroupLinearLayer(self.embed_dim//self.nb, 1, self.nb, a=0.05)\n #self.comp_sm = nn.Softmax(dim=2)\n self.competition = None\n\n if out_proj_dim is not None:\n self.out_proj = GroupLinearLayer(self.embed_dim//self.nb, out_proj_dim//self.nb, self.nb)\n else:\n self.out_proj = None\n\n def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(GroupLinearLayer(input_dim//self.nb, output_dim//self.nb, self.nb), p=q_noise, block_size=qn_block_size)\n #return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)\n\n def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):\n return quant_noise(GroupLinearLayer(input_dim//self.nb, output_dim//self.nb, self.nb), p=q_noise, block_size=qn_block_size)\n #return quant_noise(nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size)\n\n def build_self_attention(self, embed_dim, args):\n return MultiheadAttention(\n embed_dim,\n args.encoder_attention_heads,\n dropout=args.attention_dropout,\n self_attention=True,\n q_noise=self.quant_noise,\n qn_block_size=self.quant_noise_block_size,\n nblocks=self.nb,\n top_k_ratio = args.topk_ratio,\n use_value_competition = False,\n\n\n )\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, seq_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,\n where `tgt_len` is the length of output and `src_len` is the\n length of input, though here both are equal to `seq_len`.\n `attn_mask[tgt_i, src_j] = 1` means that when calculating the\n embedding for `tgt_i`, we exclude (mask out) `src_j`. This is\n useful for strided self-attention.\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n\n seq_len, bsz, _ = x.shape\n\n if self.competition is not None:\n comp = self.competition(x)\n comp = self.comp_sm(comp)\n #comp = F.gumbel_softmax(comp, tau=0.5, hard=False, dim=2)\n comp = comp.unsqueeze(-1).repeat(1,1,1,self.embed_dim//self.nb)\n comp = comp.view((x.shape[0], x.shape[1], self.embed_dim))\n else:\n comp = None\n\n if attn_mask is not None:\n attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)\n\n residual = x\n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n x, _ = self.self_attn(\n query=state if state is not None else x,\n key=x,\n value=x,\n key_padding_mask=encoder_padding_mask,\n attn_mask=attn_mask,\n )\n\n x = self.dropout_module(x)\n x = residual + x\n\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n if self.blockatt:\n if self.normalize_before:\n x = self.comm_norm(x)\n\n residual = x\n x, _ = self.comm(x)\n x = self.dropout_module(x)\n x = residual + x\n\n if not self.normalize_before:\n x = self.comm_norm(x)\n\n if self.blockatt_memory:\n if self.normalize_before:\n x = self.self_mem_norm(x)\n residual = x\n T,bsz,nhid = x.shape\n if comp is not None:\n x_write = comp * x\n else:\n x_write = x*1.0\n _, new_memory = self.memory_layer.forward_step(x_write.reshape((T*bsz, nhid)), self.memory_obj[0])\n self.memory_obj[0] = new_memory\n Tbs,num_slots,nhid_slot = new_memory.shape\n mem_read = new_memory.reshape((T, bsz, num_slots*nhid_slot))\n x,_ = self.memory_attention(x, mem_read)\n x = residual + x\n\n if not self.normalize_before:\n x = self.self_mem_norm(x)\n\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n\n #print('fc1 on shape', x.shape, 'in encoder')\n x = self.activation_fn(self.fc1(x))\n x = self.activation_dropout_module(x)\n #print('fc2 on shape', x.shape, 'in encoder')\n x = self.fc2(x)\n x = self.dropout_module(x)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n\n if self.out_proj is not None:\n x = self.out_proj(x)\n\n return x"
},
{
"identifier": "TransformerEncoderLayerVanilla",
"path": "multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py",
"snippet": "class TransformerEncoderLayerVanilla(nn.Module):\n \"\"\"Encoder layer block.\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n def __init__(self, args, out_proj = None):\n super().__init__()\n self.embed_dim = args.encoder_embed_dim\n self.self_attn = self.build_self_attention(self.embed_dim, args)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) \n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn( \n activation=getattr(args, \"activation_fn\", \"relu\")\n )\n self.activation_dropout = getattr(args, \"activation_dropout\", 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, \"relu_dropout\", 0)\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim)\n self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5)\n\n if out_proj is not None:\n self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj)\n else:\n self.final_linear = None\n\n def build_fc1(self, input_dim, output_dim):\n return nn.Linear(input_dim, output_dim)\n\n def build_fc2(self, input_dim, output_dim):\n return nn.Linear(input_dim, output_dim)\n\n def build_self_attention(self, embed_dim, args):\n return MultiheadAttention(\n embed_dim,\n args.encoder_attention_heads,\n dropout=args.attention_dropout,\n self_attention=args.self_attention,\n shared_memory_attention = args.shared_memory_attention,\n use_topk = args.use_topk,\n topk = args.topk,\n num_steps = args.num_steps,\n mem_slots = args.mem_slots,\n null_attention = args.null_attention,\n regressive = args.regressive\n )\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\"0\": \"self_attn_layer_norm\", \"1\": \"final_layer_norm\"}\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layer_norms.{}.{}\".format(name, old, m)\n if k in state_dict:\n state_dict[\"{}.{}.{}\".format(name, new, m)] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None, memory = None):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where\n T_tgt is the length of query, while T_src is the length of key,\n though here both query and key is x here,\n attn_mask[t_tgt, t_src] = 1 means when calculating embedding\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n residual = x \n if self.normalize_before:\n x = self.self_attn_layer_norm(x)\n if attn_mask is not None:\n attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8)\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n # TODO: to formally solve this problem, we need to change fairseq's\n # MultiheadAttention. We will do this later on.\n #print(state is not None)\n\n x, memory, _ = self.self_attn( \n query=state if state is not None else x, \n key=x, \n value=x, \n key_padding_mask=encoder_padding_mask, \n attn_mask=attn_mask, \n memory = memory\n )\n\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.self_attn_layer_norm(x)\n\n residual = x\n if self.normalize_before:\n x = self.final_layer_norm(x)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=float(self.activation_dropout), training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n if not self.normalize_before:\n x = self.final_layer_norm(x)\n if self.final_linear is not None:\n x = self.final_linear(x)\n return x, memory "
},
{
"identifier": "PositionEncoder",
"path": "multi_part_assembly/utils/wx_transformer_utilities/pos_enc.py",
"snippet": "class PositionEncoder(nn.Module):\n def __init__(self, d_model, max_seq_len = 300):\n super().__init__()\n self.d_model = d_model\n # create constant 'pe' matrix with values dependant on\n # pos and i\n pe = torch.zeros(max_seq_len, d_model)\n for pos in range(max_seq_len):\n for i in range(0, d_model, 2):\n pe[pos, i] = \\\n math.sin(pos / (10000 ** ((2 * i)/d_model)))\n pe[pos, i + 1] = \\\n math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))\n\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n self.pos_emb_weight = nn.Parameter(torch.ones_like(pe))\n\n def forward(self, x):\n # make embeddings relatively larger\n\n x = x.permute(1,0,2)\n\n #x = x * math.sqrt(self.d_model)\n #add constant to embedding\n\n seq_len = x.size(1)\n\n #width x channel\n #pe_use = F.interpolate(self.pe.permute(0,2,1), size=seq_len).permute(0,2,1)\n\n pe_use = Variable(self.pe[:,:seq_len] * F.sigmoid(self.pos_emb_weight[:,:seq_len]), requires_grad=False).cuda()\n\n #bs x pos x nhid --> bs x nhid x pos --> bs x pos x nhid\n\n x = x + pe_use\n #Variable(pe_use, requires_grad=False).cuda()\n\n x = x.permute(1,0,2)\n\n return x"
},
{
"identifier": "GroupLinearLayer",
"path": "multi_part_assembly/utils/wx_transformer_utilities/GroupLinearLayer.py",
"snippet": "class GroupLinearLayer(nn.Module):\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n #din = din // num_blocks\n #dout = dout // num_blocks\n self.dout = dout\n if a is None:\n a = 1. / math.sqrt(dout)\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n self.bias = bias\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n def forward(self,x):\n ts,bs,m = x.shape\n #x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n if not self.bias is None:\n x = x + self.bias\n #x = x.reshape((ts, bs, self.dout*self.nb))\n return x"
}
] | import torch
import torch.nn as nn
import types
import math
import numpy as np
import math
import time
import time
from multi_part_assembly.utils.wx_transformer_utilities.transformer_layer import TransformerEncoderLayer, TransformerEncoderLayerVanilla
from multi_part_assembly.utils.wx_transformer_utilities.pos_enc import PositionEncoder
from multi_part_assembly.utils.wx_transformer_utilities.GroupLinearLayer import GroupLinearLayer | 5,903 | args.topk_ratio = 1.0
args.dropout = 0.2
args.encoder_normalize_before = True
args.encoder_ffn_embed_dim = 2048
args.use_nfm = 'false'
args.shared_memory_attention = False
args.self_attention = True
args.mem_slots = 4
args.use_topk = False
args.topk = 3
args.num_steps = 5
class SelectAttention(nn.Module):
"""docstring for SelectAttention"""
def __init__(self, d_read, d_write, d_k = 16, num_read = 5, num_write = 5, share_query = False, share_key = False):
super(SelectAttention, self).__init__()
if not share_key:
self.gll_write = GroupLinearLayer(d_write,d_k, num_write)
else:
self.gll_write = nn.Linear(d_write, d_k)
if not share_query:
self.gll_read = GroupLinearLayer(d_read,d_k, num_read)
else:
self.gll_read = nn.Linear(d_read, d_k)
self.temperature = math.sqrt(d_k)
def forward(self, q, k):
read = self.gll_read(q)
write = self.gll_write(k)
return torch.bmm(read, write.permute(0, 2, 1)) / self.temperature
class TransformerEncoder(nn.Module):
def __init__(self,
embed_dim,
ffn_dim,
num_layers = 6,
num_heads = 4,
dropout = 0.1,
functional = False,
shared_memory_attention = False,
shared_memory_percentage = 0.1,
share_parameters = False,
mem_slots = 4,
num_attention_schemas = 3,
num_gru_schemas = 3,
schema_specific = False,
use_topk = False,
topk = 3,
num_steps = 5,
null_attention = False,
regressive = False):
super().__init__()
if schema_specific and (num_gru_schemas != num_attention_schemas):
print('Cannot use schema specific as num_gru_schemas != num_attention_schemas, continuing without')
self.schema_specific = False
else:
self.schema_specific = schema_specific
args.mem_slots = mem_slots
args.encoder_embed_dim = embed_dim
args.encoder_ffn_embed_dim = ffn_dim
args.encoder_attention_heads = num_heads
args.dropout = dropout
args.shared_memory_attention = shared_memory_attention
args.num_steps = num_steps
args.null_attention = null_attention
args.regressive = regressive
self.num_layers = num_layers
self.shared_memory_attention = shared_memory_attention
self.shared_memory_percentage = shared_memory_percentage
print('transformer embed_dim', embed_dim)
self.functional = functional
print('functional? '+str(self.functional))
if not self.functional:
layer_lst = []
args.use_topk = use_topk
args.topk = topk
args.encoder_embed_dim = embed_dim
self.share_parameters = share_parameters
if share_parameters:
self.enc = TransformerEncoderLayerVanilla(args)
else:
layer_lst = []
for i in range(self.num_layers):
layer_lst.append(TransformerEncoderLayerVanilla(args))
self.layers = nn.ModuleList(layer_lst)
else:
#args.encoder_embed_dim = inp_dim
#print('init_layer initialize')
#self.init_layer = TransformerEncoderLayerVanilla(args=args, out_proj=h_dim)
print('NUM GRU SCHEMAS:' + str(num_gru_schemas))
print('NUM Attention SCHEMAS:' + str(num_attention_schemas))
print('SCHEMA SPECIFIC:' + str(self.schema_specific))
args.use_topk = use_topk
args.topk = topk
print('inp_att initialize')
self.num_gru_schemas = num_gru_schemas
self.num_att_schemas = num_attention_schemas
self.schema_stats = np.zeros(self.num_gru_schemas)
args.self_attention = True
self.inp_att = nn.ModuleList([TransformerEncoderLayerVanilla(args=args) for _ in range(num_attention_schemas)])
self.select_attention_inp_att = SelectAttention( args.encoder_embed_dim, args.encoder_embed_dim, num_read = 1, num_write = num_attention_schemas)
print('gru initialize')
hidden_dim = args.encoder_embed_dim
self.gru_pool = nn.ModuleList([nn.GRUCell(hidden_dim, hidden_dim) for _ in range(num_gru_schemas)])
#args.self_attention = True
#self.state_att = TransformerEncoderLayerVanilla(args=args)
self.select_attention = SelectAttention( hidden_dim + hidden_dim, hidden_dim, num_read = 1, num_write = num_gru_schemas)
| #from transformer import TransformerEncoder
args = types.SimpleNamespace()
args.use_module_communication = 'true'
args.encoder_embed_dim = 512
args.encoder_attention_heads = 8 #was 8
args.attention_dropout = 0.1
args.topk_ratio = 1.0
args.dropout = 0.2
args.encoder_normalize_before = True
args.encoder_ffn_embed_dim = 2048
args.use_nfm = 'false'
args.shared_memory_attention = False
args.self_attention = True
args.mem_slots = 4
args.use_topk = False
args.topk = 3
args.num_steps = 5
class SelectAttention(nn.Module):
"""docstring for SelectAttention"""
def __init__(self, d_read, d_write, d_k = 16, num_read = 5, num_write = 5, share_query = False, share_key = False):
super(SelectAttention, self).__init__()
if not share_key:
self.gll_write = GroupLinearLayer(d_write,d_k, num_write)
else:
self.gll_write = nn.Linear(d_write, d_k)
if not share_query:
self.gll_read = GroupLinearLayer(d_read,d_k, num_read)
else:
self.gll_read = nn.Linear(d_read, d_k)
self.temperature = math.sqrt(d_k)
def forward(self, q, k):
read = self.gll_read(q)
write = self.gll_write(k)
return torch.bmm(read, write.permute(0, 2, 1)) / self.temperature
class TransformerEncoder(nn.Module):
def __init__(self,
embed_dim,
ffn_dim,
num_layers = 6,
num_heads = 4,
dropout = 0.1,
functional = False,
shared_memory_attention = False,
shared_memory_percentage = 0.1,
share_parameters = False,
mem_slots = 4,
num_attention_schemas = 3,
num_gru_schemas = 3,
schema_specific = False,
use_topk = False,
topk = 3,
num_steps = 5,
null_attention = False,
regressive = False):
super().__init__()
if schema_specific and (num_gru_schemas != num_attention_schemas):
print('Cannot use schema specific as num_gru_schemas != num_attention_schemas, continuing without')
self.schema_specific = False
else:
self.schema_specific = schema_specific
args.mem_slots = mem_slots
args.encoder_embed_dim = embed_dim
args.encoder_ffn_embed_dim = ffn_dim
args.encoder_attention_heads = num_heads
args.dropout = dropout
args.shared_memory_attention = shared_memory_attention
args.num_steps = num_steps
args.null_attention = null_attention
args.regressive = regressive
self.num_layers = num_layers
self.shared_memory_attention = shared_memory_attention
self.shared_memory_percentage = shared_memory_percentage
print('transformer embed_dim', embed_dim)
self.functional = functional
print('functional? '+str(self.functional))
if not self.functional:
layer_lst = []
args.use_topk = use_topk
args.topk = topk
args.encoder_embed_dim = embed_dim
self.share_parameters = share_parameters
if share_parameters:
self.enc = TransformerEncoderLayerVanilla(args)
else:
layer_lst = []
for i in range(self.num_layers):
layer_lst.append(TransformerEncoderLayerVanilla(args))
self.layers = nn.ModuleList(layer_lst)
else:
#args.encoder_embed_dim = inp_dim
#print('init_layer initialize')
#self.init_layer = TransformerEncoderLayerVanilla(args=args, out_proj=h_dim)
print('NUM GRU SCHEMAS:' + str(num_gru_schemas))
print('NUM Attention SCHEMAS:' + str(num_attention_schemas))
print('SCHEMA SPECIFIC:' + str(self.schema_specific))
args.use_topk = use_topk
args.topk = topk
print('inp_att initialize')
self.num_gru_schemas = num_gru_schemas
self.num_att_schemas = num_attention_schemas
self.schema_stats = np.zeros(self.num_gru_schemas)
args.self_attention = True
self.inp_att = nn.ModuleList([TransformerEncoderLayerVanilla(args=args) for _ in range(num_attention_schemas)])
self.select_attention_inp_att = SelectAttention( args.encoder_embed_dim, args.encoder_embed_dim, num_read = 1, num_write = num_attention_schemas)
print('gru initialize')
hidden_dim = args.encoder_embed_dim
self.gru_pool = nn.ModuleList([nn.GRUCell(hidden_dim, hidden_dim) for _ in range(num_gru_schemas)])
#args.self_attention = True
#self.state_att = TransformerEncoderLayerVanilla(args=args)
self.select_attention = SelectAttention( hidden_dim + hidden_dim, hidden_dim, num_read = 1, num_write = num_gru_schemas)
| self.pe = PositionEncoder(args.encoder_embed_dim) | 2 | 2023-12-15 13:13:01+00:00 | 8k |
uc-vision/taichi-splatting | taichi_splatting/perspective/projection.py | [
{
"identifier": "restore_grad",
"path": "taichi_splatting/misc/autograd.py",
"snippet": "@contextmanager\ndef restore_grad(*tensors):\n try:\n grads = [tensor.grad if tensor.grad is not None else None\n for tensor in tensors]\n \n for tensor in tensors: \n if tensor.requires_grad is True:\n tensor.grad = torch.zeros_like(tensor)\n yield\n finally:\n for tensor, grad in zip(tensors, grads):\n tensor.grad = grad"
},
{
"identifier": "CameraParams",
"path": "taichi_splatting/perspective/params.py",
"snippet": "class CameraParams:\n T_image_camera: torch.Tensor # (3, 3) camera projection matrix\n T_camera_world : torch.Tensor # (4, 4) camera view matrix\n\n @property\n def device(self):\n return self.T_image_camera.device\n\n @property\n def T_image_world(self):\n T_image_camera = torch.eye(4, \n device=self.T_image_camera.device, dtype=self.T_image_camera.dtype)\n T_image_camera[0:3, 0:3] = self.T_image_camera\n\n return T_image_camera @ self.T_camera_world\n\n near_plane: float\n far_plane: float\n image_size: Tuple[Integral, Integral]\n\n def __repr__(self):\n w, h = self.image_size\n fx, fy = self.T_image_camera[0, 0], self.T_image_camera[1, 1]\n cx, cy = self.T_image_camera[0, 2], self.T_image_camera[1, 2]\n\n pos_str = \", \".join([f\"{x:.3f}\" for x in self.camera_position])\n return f\"CameraParams({w}x{h}, fx={fx:.4f}, fy={fy:.4f}, cx={cx:.4f}, cy={cy:.4f}, clipping={self.near_plane:.4f}-{self.far_plane:.4f}, position=({pos_str})\"\n \n\n @property\n def camera_position(self):\n T_world_camera = torch.inverse(self.T_camera_world)\n return T_world_camera[0:3, 3]\n\n def to(self, device=None, dtype=None):\n return CameraParams(\n T_image_camera=self.T_image_camera.to(device=device, dtype=dtype),\n T_camera_world=self.T_camera_world.to(device=device, dtype=dtype),\n near_plane=self.near_plane,\n far_plane=self.far_plane,\n image_size=self.image_size\n )\n\n def __post_init__(self):\n assert self.T_image_camera.shape == (3, 3), f\"Expected shape (3, 3), got {self.T_image_camera.shape}\"\n assert self.T_camera_world.shape == (4, 4), f\"Expected shape (4, 4), got {self.T_camera_world.shape}\"\n\n assert len(self.image_size) == 2\n assert self.near_plane > 0\n assert self.far_plane > self.near_plane"
},
{
"identifier": "make_library",
"path": "taichi_splatting/taichi_lib/generic.py",
"snippet": "def make_library(dtype=ti.f32):\n \"\"\"\n This function returns a namespace containing all the functions and data types\n that are used in the other modules. This is done to provide different precisions\n for the same code. Primarily for enabling gradient (gradcheck) testing using f64.\n \"\"\"\n\n \n vec2 = ti.types.vector(2, dtype)\n vec3 = ti.types.vector(3, dtype)\n vec4 = ti.types.vector(4, dtype)\n\n mat2 = ti.types.matrix(2, 2, dtype)\n mat3 = ti.types.matrix(3, 3, dtype)\n mat4 = ti.types.matrix(4, 4, dtype)\n\n mat4x2 = ti.types.matrix(4, 2, dtype=dtype)\n\n #\n # Gaussian datatypes\n #\n\n\n @ti.dataclass\n class Gaussian2D:\n uv : vec2\n uv_conic : vec3\n alpha : dtype\n\n\n\n @ti.dataclass\n class Gaussian3D:\n position : vec3\n log_scaling : vec3\n rotation : vec4\n alpha_logit : dtype\n\n @ti.func\n def alpha(self):\n return sigmoid(self.alpha_logit)\n\n @ti.func\n def scale(self):\n return ti.math.exp(self.log_scaling)\n\n\n vec_g2d = ti.types.vector(struct_size(Gaussian2D), dtype=dtype)\n vec_g3d = ti.types.vector(struct_size(Gaussian3D), dtype=dtype)\n\n\n @ti.func\n def to_vec_g2d(uv:vec2, uv_conic:vec3, alpha:dtype) -> vec_g2d:\n return vec_g2d(*uv, *uv_conic, alpha)\n\n @ti.func\n def to_vec_g3d(position:vec3, log_scaling:vec3, rotation:vec4, alpha_logit:dtype) -> vec_g3d:\n return vec_g3d(*position, *log_scaling, *rotation, alpha_logit)\n\n\n @ti.func\n def unpack_vec_g3d(vec:vec_g3d) -> Gaussian3D:\n return vec[0:3], vec[3:6], vec[6:10], vec[10]\n\n @ti.func\n def unpack_vec_g2d(vec:vec_g2d) -> Gaussian2D:\n return vec[0:2], vec[2:5], vec[5]\n\n @ti.func\n def get_position_g3d(vec:vec_g3d) -> vec3:\n return vec[0:3]\n\n\n @ti.func\n def from_vec_g3d(vec:vec_g3d) -> Gaussian3D:\n return Gaussian3D(vec[0:3], vec[3:6], vec[6:10], vec[10])\n\n @ti.func\n def from_vec_g2d(vec:vec_g2d) -> Gaussian2D:\n return Gaussian2D(vec[0:2], vec[2:5], vec[5])\n\n\n @ti.func\n def unpack_activate_g3d(vec:vec_g3d):\n position, log_scaling, rotation, alpha_logit = unpack_vec_g3d(vec)\n return position, ti.exp(log_scaling), ti.math.normalize(rotation), sigmoid(alpha_logit)\n\n\n @ti.func\n def bounding_sphere(vec:vec_g3d, gaussian_scale: ti.template()):\n position, log_scaling = vec[0:3], vec[3:6]\n return position, ti.exp(log_scaling).max() * gaussian_scale\n\n # Taichi structs don't have static methods, but they can be added afterward\n Gaussian2D.vec = vec_g2d\n Gaussian2D.to_vec = to_vec_g2d\n Gaussian2D.from_vec = from_vec_g2d\n Gaussian2D.unpack = unpack_vec_g2d\n\n\n Gaussian3D.vec = vec_g3d\n Gaussian3D.to_vec = to_vec_g3d\n Gaussian3D.from_vec = from_vec_g3d\n Gaussian3D.unpack = unpack_vec_g3d\n Gaussian3D.unpack_activate = unpack_activate_g3d\n Gaussian3D.get_position = get_position_g3d\n Gaussian3D.bounding_sphere = bounding_sphere\n\n\n\n #\n # Projection related functions\n #\n\n mat2x3f = ti.types.matrix(n=2, m=3, dtype=dtype)\n\n @ti.func\n def project_perspective_camera_image(\n position: vec3,\n T_camera_world: mat4,\n projective_transform: mat3,\n ):\n point_in_camera = (T_camera_world @ vec4(*position, 1)).xyz\n uv = (projective_transform @ point_in_camera) / point_in_camera.z\n return uv.xy, point_in_camera\n\n\n @ti.func\n def project_perspective(\n position: vec3,\n T_image_world: mat4,\n ):\n point_in_camera = (T_image_world @ vec4(*position, 1))\n return point_in_camera.xy / point_in_camera.z, point_in_camera.z\n\n\n\n def camera_origin(T_camera_world: mat4):\n r, t = split_rt(T_camera_world)\n t = -(r.transpose() @ t)\n return t\n\n\n @ti.func\n def gaussian_covariance_in_camera(\n T_camera_world: mat4,\n cov_rotation: vec4,\n cov_scale: vec3,\n ) -> mat3:\n \"\"\" Construct and rotate the covariance matrix in camera space\n \"\"\"\n \n W = T_camera_world[:3, :3]\n R = quat_to_mat(cov_rotation)\n\n S = mat3([\n [cov_scale.x, 0, 0],\n [0, cov_scale.y, 0],\n [0, 0, cov_scale.z]\n ])\n # covariance matrix, 3x3, equation (6) in the paper\n # Sigma = R @ S @ S.transpose() @ R.transpose()\n # cov_uv = J @ W @ Sigma @ W.transpose() @ J.transpose() # equation (5) in the paper\n \n m = W @ R @ S\n return m @ m.transpose() \n\n\n @ti.func\n def get_projective_transform_jacobian(\n projection: mat3,\n position: vec3,\n ):\n # cx = projective_transform[0, 2]\n # cy = projective_transform[1, 2]\n # [[fx/z, 0, cx/z - (cx*z + fx*x)/z**2], [0, fy/z, cy/z - (cy*z + fy*y)/z**2]]\n fx = projection[0, 0]\n fy = projection[1, 1]\n\n x, y, z = position\n return mat2x3f([\n [fx / z, 0, -(fx * x) / (z * z)],\n [0, fy / z, -(fy * y) / (z * z)]\n ])\n\n @ti.func\n def project_perspective_gaussian(\n projective_transform: mat3,\n point_in_camera: vec3,\n cov_in_camera: mat3) -> mat2:\n \"\"\" Approximate the 2D gaussian covariance in image space \"\"\"\n\n J = get_projective_transform_jacobian(\n projective_transform, point_in_camera)\n \n cov_uv = J @ cov_in_camera @ J.transpose()\n return cov_uv\n\n\n\n\n # \n # Miscellaneous math functions\n #\n @ti.func\n def sigmoid(x:dtype):\n return 1. / (1. + ti.exp(-x))\n\n @ti.func\n def inverse_sigmoid(x:dtype):\n return -ti.log(1. / x - 1.)\n\n #\n # Miscellaneous conversion functions\n #\n\n @ti.func\n def mat3_from_ndarray(ndarray:ti.template()):\n return mat3([ndarray[i, j] \n for i in ti.static(range(3)) for j in ti.static(range(3))])\n\n @ti.func\n def mat4_from_ndarray(ndarray:ti.template()):\n return mat4([ndarray[i, j] \n for i in ti.static(range(4)) for j in ti.static(range(4))])\n @ti.func\n def isfin(x):\n return ~(ti.math.isinf(x) or ti.math.isnan(x))\n\n #\n # Ellipsoid related functions, covariance, conic, etc.\n #\n\n @ti.func\n def radii_from_cov(uv_cov: vec3) -> dtype:\n \n d = (uv_cov.x - uv_cov.z)\n max_eig_sq = (uv_cov.x + uv_cov.z +\n ti.sqrt(d * d + 4.0 * uv_cov.y * uv_cov.y)) / 2.0\n \n return ti.sqrt(max_eig_sq)\n\n @ti.func\n def cov_axes(cov:vec3):\n tr = cov.x + cov.z\n det = cov.x * cov.z - cov.y * cov.y\n\n gap = tr**2 - 4 * det\n sqrt_gap = ti.sqrt(ti.max(gap, 0))\n\n lambda1 = (tr + sqrt_gap) * 0.5\n lambda2 = (tr - sqrt_gap) * 0.5\n\n v1 = vec2(cov.x - lambda2, cov.y).normalized() \n v2 = vec2(v1.y, -v1.x)\n\n return v1 * ti.sqrt(lambda1), v2 * ti.sqrt(lambda2) \n\n\n @ti.func\n def inverse_cov(cov: vec3):\n # inverse of upper triangular part of symmetric matrix\n inv_det = 1 / (cov.x * cov.z - cov.y * cov.y)\n return vec3(inv_det * cov.z, -inv_det * cov.y, inv_det * cov.x)\n\n\n @ti.func\n def upper(cov: mat2) -> vec3:\n return vec3(cov[0, 0], cov[0, 1], cov[1, 1])\n\n\n\n @ti.func\n def radii_from_conic(conic: vec3):\n return radii_from_cov(inverse_cov(conic))\n\n\n @ti.func\n def conic_pdf(xy: vec2, uv: vec2, uv_conic: vec3) -> dtype:\n dx, dy = xy - uv\n a, b, c = uv_conic\n\n p = ti.exp(-0.5 * (dx**2 * a + dy**2 * c) - dx * dy * b)\n return p\n\n\n @ti.func\n def conic_pdf_with_grad(xy: vec2, uv: vec2, uv_conic: vec3):\n d = xy - uv\n a, b, c = uv_conic\n\n dx2 = d.x**2\n dy2 = d.y**2\n dxdy = d.x * d.y\n \n p = ti.exp(-0.5 * (dx2 * a + dy2 * c) - dxdy * b)\n dp_duv = vec2(\n (b * d.y - 0.5 * a * (2 * uv.x - 2 * xy.x)) * p,\n (b * d.x - 0.5 * c * (2 * uv.y - 2 * xy.y)) * p\n )\n dp_dconic = vec3(-0.5 * dx2 * p, -dxdy * p, -0.5 * dy2 * p)\n\n return p, dp_duv, dp_dconic\n\n\n @ti.func\n def conic_grad(p: ti.f32, xy: vec2, uv: vec2, uv_conic: vec3):\n d = xy - uv\n a, b, c = uv_conic\n\n dx2 = d.x**2\n dy2 = d.y**2\n dxdy = d.x * d.y\n \n dp_duv = vec2(\n (b * d.y - 0.5 * a * (2 * uv.x - 2 * xy.x)) * p,\n (b * d.x - 0.5 * c * (2 * uv.y - 2 * xy.y)) * p\n )\n dp_dconic = vec3(-0.5 * dx2 * p, -dxdy * p, -0.5 * dy2 * p)\n\n return dp_duv, dp_dconic\n\n\n @ti.func\n def cov_inv_basis(uv_cov: vec3, scale: dtype) -> mat2:\n basis = ti.Matrix.cols(cov_axes(uv_cov))\n return (basis * scale).inverse()\n\n\n @ti.func\n def quat_to_mat(q:vec4) -> mat3:\n x, y, z, w = q\n x2, y2, z2 = x*x, y*y, z*z\n\n return mat3(\n 1 - 2*y2 - 2*z2, 2*x*y - 2*w*z, 2*x*z + 2*w*y,\n 2*x*y + 2*w*z, 1 - 2*x2 - 2*z2, 2*y*z - 2*w*x,\n 2*x*z - 2*w*y, 2*y*z + 2*w*x, 1 - 2*x2 - 2*y2\n )\n\n @ti.func\n def join_rt(r:mat3, t:vec3) -> mat4:\n return mat4(\n r[0, 0], r[0, 1], r[0, 2], t[0],\n r[1, 0], r[1, 1], r[1, 2], t[1],\n r[2, 0], r[2, 1], r[2, 2], t[2],\n 0, 0, 0, 1\n )\n\n @ti.func\n def split_rt(rt:mat4) -> ti.template():\n return rt[:3, :3], rt[:3, 3]\n\n\n @ti.func\n def qt_to_mat(q:vec4, t:vec3) -> mat4:\n r = quat_to_mat(q)\n return mat4(\n r[0, 0], r[0, 1], r[0, 2], t[0],\n r[1, 0], r[1, 1], r[1, 2], t[1],\n r[2, 0], r[2, 1], r[2, 2], t[2],\n 0, 0, 0, 1\n )\n \n\n @ti.func\n def scaling_matrix(scale:vec3) -> mat3:\n return mat3(\n scale.x, 0, 0,\n 0, scale.y, 0,\n 0, 0, scale.z\n )\n\n @ti.func\n def quat_mul(q1: vec4, q2: vec4) -> vec4:\n return vec4(\n q1.w * q2.x + q1.x * q2.w + q1.y * q2.z - q1.z * q2.y,\n q1.w * q2.y - q1.x * q2.z + q1.y * q2.w + q1.z * q2.x,\n q1.w * q2.z + q1.x * q2.y - q1.y * q2.x + q1.z * q2.w,\n q1.w * q2.w - q1.x * q2.x - q1.y * q2.y - q1.z * q2.z,\n )\n\n @ti.func\n def quat_conj(q: vec4) -> vec4:\n return vec4(-q.x, -q.y, -q.z, q.w)\n\n\n @ti.func\n def quat_rotate(q: vec4, v: vec3) -> vec3:\n qv = vec4(*v, 0.0)\n q_rot = quat_mul(q, quat_mul(qv, quat_mul(q)))\n return q_rot.xyz\n\n\n return SimpleNamespace(**locals())"
},
{
"identifier": "torch_taichi",
"path": "taichi_splatting/taichi_lib/conversions.py",
"snippet": "def struct_size(ti_struct:ti.lang.struct.StructType):"
}
] | from functools import cache
from typing import Tuple
from beartype import beartype
from taichi_splatting.misc.autograd import restore_grad
from .params import CameraParams
from taichi_splatting.taichi_lib.generic import make_library
from taichi_splatting.taichi_lib.conversions import torch_taichi
import taichi as ti
import torch | 5,476 |
@cache
def project_to_image_function(torch_dtype=torch.float32,
blur_cov:float = 0.3):
dtype = torch_taichi[torch_dtype]
lib = make_library(dtype)
Gaussian3D, Gaussian2D = lib.Gaussian3D, lib.Gaussian2D
@ti.kernel
def project_perspective_kernel(
gaussians: ti.types.ndarray(Gaussian3D.vec, ndim=1), # (N, 3 + feature_vec.n1) # input
T_image_camera: ti.types.ndarray(ndim=2), # (3, 3) camera projection
T_camera_world: ti.types.ndarray(ndim=2), # (4, 4)
points: ti.types.ndarray(Gaussian2D.vec, ndim=1), # (N, 6)
depth_var: ti.types.ndarray(lib.vec3, ndim=1), # (N, 3)
):
for idx in range(gaussians.shape[0]):
position, scale, rotation, alpha = Gaussian3D.unpack_activate(gaussians[idx])
camera_image = lib.mat3_from_ndarray(T_image_camera)
camera_world = lib.mat4_from_ndarray(T_camera_world)
uv, point_in_camera = lib.project_perspective_camera_image(
position, camera_world, camera_image)
cov_in_camera = lib.gaussian_covariance_in_camera(
camera_world, rotation, scale)
uv_cov = lib.upper(lib.project_perspective_gaussian(
camera_image, point_in_camera, cov_in_camera))
# add small fudge factor blur to avoid numerical issues
uv_cov += lib.vec3([blur_cov, 0, blur_cov])
uv_conic = lib.inverse_cov(uv_cov)
depth_var[idx] = lib.vec3(point_in_camera.z, cov_in_camera[2, 2], point_in_camera.z ** 2)
points[idx] = Gaussian2D.to_vec(
uv=uv.xy,
uv_conic=uv_conic,
alpha=alpha,
)
class _module_function(torch.autograd.Function):
@staticmethod
def forward(ctx, gaussians, T_image_camera, T_camera_world):
dtype, device = T_image_camera.dtype, T_image_camera.device
points = torch.empty((gaussians.shape[0], Gaussian2D.vec.n), dtype=dtype, device=device)
depth_vars = torch.empty((gaussians.shape[0], 3), dtype=dtype, device=device)
project_perspective_kernel(gaussians,
T_image_camera, T_camera_world,
points, depth_vars)
ctx.save_for_backward(gaussians, T_image_camera, T_camera_world, points, depth_vars)
return points, depth_vars
@staticmethod
def backward(ctx, dpoints, ddepth_vars):
gaussians, T_image_camera, T_camera_world, points, depth_vars = ctx.saved_tensors
|
@cache
def project_to_image_function(torch_dtype=torch.float32,
blur_cov:float = 0.3):
dtype = torch_taichi[torch_dtype]
lib = make_library(dtype)
Gaussian3D, Gaussian2D = lib.Gaussian3D, lib.Gaussian2D
@ti.kernel
def project_perspective_kernel(
gaussians: ti.types.ndarray(Gaussian3D.vec, ndim=1), # (N, 3 + feature_vec.n1) # input
T_image_camera: ti.types.ndarray(ndim=2), # (3, 3) camera projection
T_camera_world: ti.types.ndarray(ndim=2), # (4, 4)
points: ti.types.ndarray(Gaussian2D.vec, ndim=1), # (N, 6)
depth_var: ti.types.ndarray(lib.vec3, ndim=1), # (N, 3)
):
for idx in range(gaussians.shape[0]):
position, scale, rotation, alpha = Gaussian3D.unpack_activate(gaussians[idx])
camera_image = lib.mat3_from_ndarray(T_image_camera)
camera_world = lib.mat4_from_ndarray(T_camera_world)
uv, point_in_camera = lib.project_perspective_camera_image(
position, camera_world, camera_image)
cov_in_camera = lib.gaussian_covariance_in_camera(
camera_world, rotation, scale)
uv_cov = lib.upper(lib.project_perspective_gaussian(
camera_image, point_in_camera, cov_in_camera))
# add small fudge factor blur to avoid numerical issues
uv_cov += lib.vec3([blur_cov, 0, blur_cov])
uv_conic = lib.inverse_cov(uv_cov)
depth_var[idx] = lib.vec3(point_in_camera.z, cov_in_camera[2, 2], point_in_camera.z ** 2)
points[idx] = Gaussian2D.to_vec(
uv=uv.xy,
uv_conic=uv_conic,
alpha=alpha,
)
class _module_function(torch.autograd.Function):
@staticmethod
def forward(ctx, gaussians, T_image_camera, T_camera_world):
dtype, device = T_image_camera.dtype, T_image_camera.device
points = torch.empty((gaussians.shape[0], Gaussian2D.vec.n), dtype=dtype, device=device)
depth_vars = torch.empty((gaussians.shape[0], 3), dtype=dtype, device=device)
project_perspective_kernel(gaussians,
T_image_camera, T_camera_world,
points, depth_vars)
ctx.save_for_backward(gaussians, T_image_camera, T_camera_world, points, depth_vars)
return points, depth_vars
@staticmethod
def backward(ctx, dpoints, ddepth_vars):
gaussians, T_image_camera, T_camera_world, points, depth_vars = ctx.saved_tensors
| with restore_grad(gaussians, T_image_camera, T_camera_world, points, depth_vars): | 0 | 2023-12-17 15:26:52+00:00 | 8k |
camenduru/FreeInit-hf | animatediff/utils/util.py | [
{
"identifier": "convert_ldm_unet_checkpoint",
"path": "animatediff/utils/convert_from_ckpt.py",
"snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint"
},
{
"identifier": "convert_ldm_clip_checkpoint",
"path": "animatediff/utils/convert_from_ckpt.py",
"snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model"
},
{
"identifier": "convert_ldm_vae_checkpoint",
"path": "animatediff/utils/convert_from_ckpt.py",
"snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint"
},
{
"identifier": "convert_lora",
"path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py",
"snippet": "def convert_lora(pipeline, state_dict, LORA_PREFIX_UNET=\"lora_unet\", LORA_PREFIX_TEXT_ENCODER=\"lora_te\", alpha=0.6):\n # load base model\n # pipeline = StableDiffusionPipeline.from_pretrained(base_model_path, torch_dtype=torch.float32)\n\n # load LoRA weight from .safetensors\n # state_dict = load_file(checkpoint_path)\n\n visited = []\n\n # directly update weight in diffusers model\n for key in state_dict:\n # it is suggested to print out the key, it usually will be something like below\n # \"lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight\"\n\n # as we have set the alpha beforehand, so just skip\n if \".alpha\" in key or key in visited:\n continue\n\n if \"text\" in key:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\"_\")\n curr_layer = pipeline.text_encoder\n else:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_UNET + \"_\")[-1].split(\"_\")\n curr_layer = pipeline.unet\n\n # find the target layer\n temp_name = layer_infos.pop(0)\n while len(layer_infos) > -1:\n try:\n curr_layer = curr_layer.__getattr__(temp_name)\n if len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n elif len(layer_infos) == 0:\n break\n except Exception:\n if len(temp_name) > 0:\n temp_name += \"_\" + layer_infos.pop(0)\n else:\n temp_name = layer_infos.pop(0)\n\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n if len(state_dict[pair_keys[0]].shape) == 4:\n weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3).to(curr_layer.weight.data.device)\n else:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n # update visited list\n for item in pair_keys:\n visited.append(item)\n\n return pipeline"
},
{
"identifier": "convert_motion_lora_ckpt_to_diffusers",
"path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py",
"snippet": "def convert_motion_lora_ckpt_to_diffusers(pipeline, state_dict, alpha=1.0):\n # directly update weight in diffusers model\n for key in state_dict:\n # only process lora down key\n if \"up.\" in key: continue\n\n up_key = key.replace(\".down.\", \".up.\")\n model_key = key.replace(\"processor.\", \"\").replace(\"_lora\", \"\").replace(\"down.\", \"\").replace(\"up.\", \"\")\n model_key = model_key.replace(\"to_out.\", \"to_out.0.\")\n layer_infos = model_key.split(\".\")[:-1]\n\n curr_layer = pipeline.unet\n while len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n curr_layer = curr_layer.__getattr__(temp_name)\n\n weight_down = state_dict[key]\n weight_up = state_dict[up_key]\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n return pipeline"
}
] | import os
import imageio
import numpy as np
import torch
import torchvision
import torch.distributed as dist
from typing import Union
from safetensors import safe_open
from tqdm import tqdm
from einops import rearrange
from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint
from animatediff.utils.convert_lora_safetensor_to_diffusers import convert_lora, convert_motion_lora_ckpt_to_diffusers | 6,905 |
def zero_rank_print(s):
if (not dist.is_initialized()) and (dist.is_initialized() and dist.get_rank() == 0): print("### " + s)
def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):
videos = rearrange(videos, "b c t h w -> t b c h w")
outputs = []
for x in videos:
x = torchvision.utils.make_grid(x, nrow=n_rows)
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
if rescale:
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
x = (x * 255).numpy().astype(np.uint8)
outputs.append(x)
os.makedirs(os.path.dirname(path), exist_ok=True)
imageio.mimsave(path, outputs, fps=fps)
# DDIM Inversion
@torch.no_grad()
def init_prompt(prompt, pipeline):
uncond_input = pipeline.tokenizer(
[""], padding="max_length", max_length=pipeline.tokenizer.model_max_length,
return_tensors="pt"
)
uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0]
text_input = pipeline.tokenizer(
[prompt],
padding="max_length",
max_length=pipeline.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0]
context = torch.cat([uncond_embeddings, text_embeddings])
return context
def next_step(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int,
sample: Union[torch.FloatTensor, np.ndarray], ddim_scheduler):
timestep, next_timestep = min(
timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep
alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
beta_prod_t = 1 - alpha_prod_t
next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
return next_sample
def get_noise_pred_single(latents, t, context, unet):
noise_pred = unet(latents, t, encoder_hidden_states=context)["sample"]
return noise_pred
@torch.no_grad()
def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
context = init_prompt(prompt, pipeline)
uncond_embeddings, cond_embeddings = context.chunk(2)
all_latent = [latent]
latent = latent.clone().detach()
for i in tqdm(range(num_inv_steps)):
t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet)
latent = next_step(noise_pred, t, latent, ddim_scheduler)
all_latent.append(latent)
return all_latent
@torch.no_grad()
def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=""):
ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)
return ddim_latents
def load_weights(
animation_pipeline,
# motion module
motion_module_path = "",
motion_module_lora_configs = [],
# image layers
dreambooth_model_path = "",
lora_model_path = "",
lora_alpha = 0.8,
):
# 1.1 motion module
unet_state_dict = {}
if motion_module_path != "":
print(f"load motion module from {motion_module_path}")
motion_module_state_dict = torch.load(motion_module_path, map_location="cpu")
motion_module_state_dict = motion_module_state_dict["state_dict"] if "state_dict" in motion_module_state_dict else motion_module_state_dict
unet_state_dict.update({name: param for name, param in motion_module_state_dict.items() if "motion_modules." in name})
missing, unexpected = animation_pipeline.unet.load_state_dict(unet_state_dict, strict=False)
assert len(unexpected) == 0
del unet_state_dict
if dreambooth_model_path != "":
print(f"load dreambooth model from {dreambooth_model_path}")
if dreambooth_model_path.endswith(".safetensors"):
dreambooth_state_dict = {}
with safe_open(dreambooth_model_path, framework="pt", device="cpu") as f:
for key in f.keys():
dreambooth_state_dict[key] = f.get_tensor(key)
elif dreambooth_model_path.endswith(".ckpt"):
dreambooth_state_dict = torch.load(dreambooth_model_path, map_location="cpu")
# 1. vae
converted_vae_checkpoint = convert_ldm_vae_checkpoint(dreambooth_state_dict, animation_pipeline.vae.config)
animation_pipeline.vae.load_state_dict(converted_vae_checkpoint)
# 2. unet
|
def zero_rank_print(s):
if (not dist.is_initialized()) and (dist.is_initialized() and dist.get_rank() == 0): print("### " + s)
def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):
videos = rearrange(videos, "b c t h w -> t b c h w")
outputs = []
for x in videos:
x = torchvision.utils.make_grid(x, nrow=n_rows)
x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
if rescale:
x = (x + 1.0) / 2.0 # -1,1 -> 0,1
x = (x * 255).numpy().astype(np.uint8)
outputs.append(x)
os.makedirs(os.path.dirname(path), exist_ok=True)
imageio.mimsave(path, outputs, fps=fps)
# DDIM Inversion
@torch.no_grad()
def init_prompt(prompt, pipeline):
uncond_input = pipeline.tokenizer(
[""], padding="max_length", max_length=pipeline.tokenizer.model_max_length,
return_tensors="pt"
)
uncond_embeddings = pipeline.text_encoder(uncond_input.input_ids.to(pipeline.device))[0]
text_input = pipeline.tokenizer(
[prompt],
padding="max_length",
max_length=pipeline.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_embeddings = pipeline.text_encoder(text_input.input_ids.to(pipeline.device))[0]
context = torch.cat([uncond_embeddings, text_embeddings])
return context
def next_step(model_output: Union[torch.FloatTensor, np.ndarray], timestep: int,
sample: Union[torch.FloatTensor, np.ndarray], ddim_scheduler):
timestep, next_timestep = min(
timestep - ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps, 999), timestep
alpha_prod_t = ddim_scheduler.alphas_cumprod[timestep] if timestep >= 0 else ddim_scheduler.final_alpha_cumprod
alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
beta_prod_t = 1 - alpha_prod_t
next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction
return next_sample
def get_noise_pred_single(latents, t, context, unet):
noise_pred = unet(latents, t, encoder_hidden_states=context)["sample"]
return noise_pred
@torch.no_grad()
def ddim_loop(pipeline, ddim_scheduler, latent, num_inv_steps, prompt):
context = init_prompt(prompt, pipeline)
uncond_embeddings, cond_embeddings = context.chunk(2)
all_latent = [latent]
latent = latent.clone().detach()
for i in tqdm(range(num_inv_steps)):
t = ddim_scheduler.timesteps[len(ddim_scheduler.timesteps) - i - 1]
noise_pred = get_noise_pred_single(latent, t, cond_embeddings, pipeline.unet)
latent = next_step(noise_pred, t, latent, ddim_scheduler)
all_latent.append(latent)
return all_latent
@torch.no_grad()
def ddim_inversion(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt=""):
ddim_latents = ddim_loop(pipeline, ddim_scheduler, video_latent, num_inv_steps, prompt)
return ddim_latents
def load_weights(
animation_pipeline,
# motion module
motion_module_path = "",
motion_module_lora_configs = [],
# image layers
dreambooth_model_path = "",
lora_model_path = "",
lora_alpha = 0.8,
):
# 1.1 motion module
unet_state_dict = {}
if motion_module_path != "":
print(f"load motion module from {motion_module_path}")
motion_module_state_dict = torch.load(motion_module_path, map_location="cpu")
motion_module_state_dict = motion_module_state_dict["state_dict"] if "state_dict" in motion_module_state_dict else motion_module_state_dict
unet_state_dict.update({name: param for name, param in motion_module_state_dict.items() if "motion_modules." in name})
missing, unexpected = animation_pipeline.unet.load_state_dict(unet_state_dict, strict=False)
assert len(unexpected) == 0
del unet_state_dict
if dreambooth_model_path != "":
print(f"load dreambooth model from {dreambooth_model_path}")
if dreambooth_model_path.endswith(".safetensors"):
dreambooth_state_dict = {}
with safe_open(dreambooth_model_path, framework="pt", device="cpu") as f:
for key in f.keys():
dreambooth_state_dict[key] = f.get_tensor(key)
elif dreambooth_model_path.endswith(".ckpt"):
dreambooth_state_dict = torch.load(dreambooth_model_path, map_location="cpu")
# 1. vae
converted_vae_checkpoint = convert_ldm_vae_checkpoint(dreambooth_state_dict, animation_pipeline.vae.config)
animation_pipeline.vae.load_state_dict(converted_vae_checkpoint)
# 2. unet | converted_unet_checkpoint = convert_ldm_unet_checkpoint(dreambooth_state_dict, animation_pipeline.unet.config) | 0 | 2023-12-19 21:06:32+00:00 | 8k |
exislow/tidal-dl-ng | tidal_dl_ng/download.py | [
{
"identifier": "Settings",
"path": "tidal_dl_ng/config.py",
"snippet": "class Settings(BaseConfig, metaclass=SingletonMeta):\n cls_model = ModelSettings\n data = None\n\n def __init__(self):\n self.file_path = path_file_settings()\n self.read(self.file_path)"
},
{
"identifier": "REQUESTS_TIMEOUT_SEC",
"path": "tidal_dl_ng/constants.py",
"snippet": "REQUESTS_TIMEOUT_SEC = 45"
},
{
"identifier": "CoverDimensions",
"path": "tidal_dl_ng/constants.py",
"snippet": "class CoverDimensions(Enum):\n Px320: str = \"320x320\"\n Px640: str = \"640x640\"\n Px1280: str = \"1280x1280\""
},
{
"identifier": "MediaType",
"path": "tidal_dl_ng/constants.py",
"snippet": "class MediaType(Enum):\n TRACK: str = \"track\"\n VIDEO: str = \"video\"\n PLAYLIST: str = \"playlist\"\n ALBUM: str = \"album\"\n MIX: str = \"mix\""
},
{
"identifier": "SkipExisting",
"path": "tidal_dl_ng/constants.py",
"snippet": "class SkipExisting(Enum):\n Disabled: bool = False\n Filename: str = \"exact\"\n ExtensionIgnore: str = \"extension_ignore\""
},
{
"identifier": "StreamManifestMimeType",
"path": "tidal_dl_ng/constants.py",
"snippet": "class StreamManifestMimeType(Enum):\n MPD: str = \"application/dash+xml\"\n BTS: str = \"application/vnd.tidal.bts\"\n VIDEO: str = \"video/mp2t\""
},
{
"identifier": "decrypt_file",
"path": "tidal_dl_ng/helper/decryption.py",
"snippet": "def decrypt_file(path_file_encrypted: str, path_file_destination: str, key: str, nonce: str) -> None:\n \"\"\"\n Decrypts an encrypted MQA file given the file, key and nonce\n \"\"\"\n\n # Initialize counter and file decryptor\n counter = Counter.new(64, prefix=nonce, initial_value=0)\n decryptor = AES.new(key, AES.MODE_CTR, counter=counter)\n\n # Open and decrypt\n with open(path_file_encrypted, \"rb\") as f_src:\n audio_decrypted = decryptor.decrypt(f_src.read())\n\n # Replace with decrypted file\n with open(path_file_destination, \"wb\") as f_dst:\n f_dst.write(audio_decrypted)"
},
{
"identifier": "decrypt_security_token",
"path": "tidal_dl_ng/helper/decryption.py",
"snippet": "def decrypt_security_token(security_token: str) -> (str, str):\n \"\"\"\n Decrypts security token into key and nonce pair\n\n security_token should match the securityToken value from the web response\n \"\"\"\n\n # Do not change this\n master_key = \"UIlTTEMmmLfGowo/UC60x2H45W6MdGgTRfo/umg4754=\"\n\n # Decode the base64 strings to ascii strings\n master_key = base64.b64decode(master_key)\n security_token = base64.b64decode(security_token)\n\n # Get the IV from the first 16 bytes of the securityToken\n iv = security_token[:16]\n encrypted_st = security_token[16:]\n\n # Initialize decryptor\n decryptor = AES.new(master_key, AES.MODE_CBC, iv)\n\n # Decrypt the security token\n decrypted_st = decryptor.decrypt(encrypted_st)\n\n # Get the audio stream decryption key and nonce from the decrypted security token\n key = decrypted_st[:16]\n nonce = decrypted_st[16:24]\n\n return key, nonce"
},
{
"identifier": "MediaMissing",
"path": "tidal_dl_ng/helper/exceptions.py",
"snippet": "class MediaMissing(Exception):\n pass"
},
{
"identifier": "MediaUnknown",
"path": "tidal_dl_ng/helper/exceptions.py",
"snippet": "class MediaUnknown(Exception):\n pass"
},
{
"identifier": "UnknownManifestFormat",
"path": "tidal_dl_ng/helper/exceptions.py",
"snippet": "class UnknownManifestFormat(Exception):\n pass"
},
{
"identifier": "check_file_exists",
"path": "tidal_dl_ng/helper/path.py",
"snippet": "def check_file_exists(path_file: str, extension_ignore: bool = False):\n if extension_ignore:\n path_file = Path(path_file).stem + \".*\"\n\n # TODO: Check what happens is (no) files .\n result = bool(glob.glob(path_file))\n\n return result"
},
{
"identifier": "format_path_media",
"path": "tidal_dl_ng/helper/path.py",
"snippet": "def format_path_media(fmt_template: str, media: Track | Album | Playlist | UserPlaylist | Video | Mix) -> str:\n result = fmt_template\n\n # Search track format template for placeholder.\n regex = r\"\\{(.+?)\\}\"\n matches = re.finditer(regex, fmt_template, re.MULTILINE)\n fn_format = get_format_fn(media)\n\n for _matchNum, match in enumerate(matches, start=1):\n template_str = match.group()\n result_fmt = fn_format(match.group(1), media)\n\n if result_fmt:\n value = sanitize_filename(result_fmt)\n result = result.replace(template_str, value)\n\n return result"
},
{
"identifier": "path_file_sanitize",
"path": "tidal_dl_ng/helper/path.py",
"snippet": "def path_file_sanitize(path_file: str, adapt: bool = False) -> (bool, str):\n # Split into path and filename\n pathname, filename = os.path.split(path_file)\n\n # Sanitize path\n try:\n pathname_sanitized = sanitize_filepath(\n pathname, replacement_text=\" \", validate_after_sanitize=True, platform=\"auto\"\n )\n except ValidationError:\n # If adaption of path is allowed in case of an error set path to HOME.\n if adapt:\n pathname_sanitized = Path.home()\n else:\n raise\n\n # Sanitize filename\n try:\n filename_sanitized = sanitize_filename(\n filename, replacement_text=\" \", validate_after_sanitize=True, platform=\"auto\"\n )\n filename_sanitized_extension = Path(filename_sanitized).suffix\n\n # Check if the file extension was removed by shortening the filename length\n if filename_sanitized_extension == \"\":\n # Add the original file extension\n file_extension = \"_\" + Path(path_file).suffix\n filename_sanitized = filename_sanitized[: -len(file_extension)] + file_extension\n except ValidationError as e:\n # TODO: Implement proper exception handling and logging.\n print(e)\n\n raise\n\n # Join path and filename\n result = os.path.join(pathname_sanitized, filename_sanitized)\n\n return result"
},
{
"identifier": "name_builder_item",
"path": "tidal_dl_ng/helper/tidal.py",
"snippet": "def name_builder_item(media: Track) -> str:\n return f\"{name_builder_artist(media)} - {name_builder_title(media)}\""
},
{
"identifier": "WrapperLogger",
"path": "tidal_dl_ng/helper/wrapper.py",
"snippet": "class WrapperLogger:\n fn_print: Callable = None\n\n def __init__(self, fn_print: Callable):\n self.fn_print = fn_print\n\n def debug(self, value):\n self.fn_print(value)\n\n def warning(self, value):\n self.fn_print(value)\n\n def info(self, value):\n self.fn_print(value)\n\n def error(self, value):\n self.fn_print(value)\n\n def critical(self, value):\n self.fn_print(value)"
},
{
"identifier": "Metadata",
"path": "tidal_dl_ng/metadata.py",
"snippet": "class Metadata:\n path_file: str = None\n title: str = None\n album: str = None\n albumartist: str = None\n artists: [str] = None\n copy_right: str = None\n tracknumber: int = None\n discnumber: int = None\n totaldisc: int = None\n totaltrack: int = None\n date: str = None\n composer: [str] = None\n isrc: str = None\n lyrics: str = None\n path_cover: str = None\n url_cover: str = None\n m: mutagen.mp4.MP4 | mutagen.mp4.MP4 | mutagen.flac.FLAC = None\n\n # TODO: What about videos?\n def __init__(\n self,\n path_file: str,\n album: str = \"\",\n title: str = \"\",\n artists: list[str] | None = None,\n copy_right: str = \"\",\n tracknumber: int = 0,\n discnumber: int = 0,\n totaltrack: int = 0,\n totaldisc: int = 0,\n composer: list[str] | None = None,\n isrc: str = \"\",\n albumartist: str = \"\",\n date: str = \"\",\n lyrics: str = \"\",\n path_cover: str = \"\",\n url_cover: str = \"\",\n ):\n self.path_file = path_file\n self.title = title\n self.album = album\n self.albumartist = albumartist\n self.artists = artists\n self.copy_right = copy_right\n self.tracknumber = tracknumber\n self.discnumber = discnumber\n self.totaldisc = totaldisc\n self.totaltrack = totaltrack\n self.date = date\n self.composer = composer\n self.isrc = isrc\n self.lyrics = lyrics\n self.path_cover = path_cover\n self.url_cover = url_cover\n self.m: mutagen.mp4.MP4 | mutagen.flac.FLAC | mutagen.mp3.MP3 = mutagen.File(self.path_file)\n\n def _cover(self) -> bool:\n result: bool = False\n data_cover: str | bytes = self.cover_data(url=self.url_cover, path_file=self.path_cover)\n\n if data_cover:\n if isinstance(self.m, mutagen.flac.FLAC):\n flac_cover = flac.Picture()\n flac_cover.type = id3.PictureType.COVER_FRONT\n flac_cover.data = data_cover\n flac_cover.mime = \"image/jpeg\"\n\n self.m.clear_pictures()\n self.m.add_picture(flac_cover)\n elif isinstance(self.m, mutagen.mp3.MP3):\n self.m.tags.add(APIC(encoding=3, data=data_cover))\n elif isinstance(self.m, mutagen.mp4.MP4):\n cover_mp4 = mp4.MP4Cover(data_cover)\n self.m.tags[\"covr\"] = [cover_mp4]\n\n result = True\n\n return result\n\n def save(self):\n if not self.m.tags:\n self.m.add_tags()\n\n if isinstance(self.m, mutagen.flac.FLAC):\n self.set_flac()\n elif isinstance(self.m, mutagen.mp3.MP3):\n self.set_mp3()\n elif isinstance(self.m, mutagen.mp4.MP4):\n self.set_mp4()\n\n self._cover()\n self.m.save()\n\n return True\n\n def set_flac(self):\n self.m.tags[\"title\"] = self.title\n self.m.tags[\"album\"] = self.album\n self.m.tags[\"albumartist\"] = self.albumartist\n self.m.tags[\"artist\"] = \", \".join(self.artists) if self.artists else \"\"\n self.m.tags[\"copyright\"] = self.copy_right\n self.m.tags[\"tracknumber\"] = str(self.tracknumber)\n self.m.tags[\"tracktotal\"] = str(self.totaltrack)\n self.m.tags[\"discnumber\"] = str(self.discnumber)\n self.m.tags[\"disctotal\"] = str(self.totaldisc)\n self.m.tags[\"date\"] = self.date\n self.m.tags[\"composer\"] = \", \".join(self.composer) if self.composer else \"\"\n self.m.tags[\"isrc\"] = self.isrc\n self.m.tags[\"lyrics\"] = self.lyrics\n\n def set_mp3(self):\n self.m.tags.add(TIT2(encoding=3, text=self.title))\n self.m.tags.add(TALB(encoding=3, text=self.album))\n self.m.tags.add(TOPE(encoding=3, text=self.albumartist))\n self.m.tags.add(TPE1(encoding=3, text=\", \".join(self.artists) if self.artists else \"\"))\n self.m.tags.add(TCOP(encoding=3, text=self.copy_right))\n self.m.tags.add(TRCK(encoding=3, text=str(self.tracknumber)))\n self.m.tags.add(TRCK(encoding=3, text=self.discnumber))\n self.m.tags.add(TDRC(encoding=3, text=self.date))\n self.m.tags.add(TCOM(encoding=3, text=\", \".join(self.composer) if self.composer else \"\"))\n self.m.tags.add(TSRC(encoding=3, text=self.isrc))\n self.m.tags.add(USLT(encoding=3, lang=\"eng\", desc=\"desc\", text=self.lyrics))\n\n def set_mp4(self):\n self.m.tags[\"\\xa9nam\"] = self.title\n self.m.tags[\"\\xa9alb\"] = self.album\n self.m.tags[\"aART\"] = self.albumartist\n self.m.tags[\"\\xa9ART\"] = \", \".join(self.artists) if self.artists else \"\"\n self.m.tags[\"cprt\"] = self.copy_right\n self.m.tags[\"trkn\"] = [[self.tracknumber, self.totaltrack]]\n self.m.tags[\"disk\"] = [[self.discnumber, self.totaldisc]]\n # self.m.tags['\\xa9gen'] = self.genre\n self.m.tags[\"\\xa9day\"] = self.date\n self.m.tags[\"\\xa9wrt\"] = \", \".join(self.composer) if self.composer else \"\"\n self.m.tags[\"\\xa9lyr\"] = self.lyrics\n\n def cover_data(self, url: str = None, path_file: str = None) -> str | bytes:\n result: str | bytes = \"\"\n\n if url:\n try:\n result = requests.get(url, timeout=REQUESTS_TIMEOUT_SEC).content\n except Exception as e:\n # TODO: Implement propper logging.\n print(e)\n elif path_file:\n try:\n with open(path_file, \"rb\") as f:\n result = f.read()\n except OSError as e:\n # TODO: Implement propper logging.\n print(e)\n\n return result"
},
{
"identifier": "ProgressBars",
"path": "tidal_dl_ng/model/gui_data.py",
"snippet": "class ProgressBars:\n item: QtCore.Signal\n item_name: QtCore.Signal\n list_item: QtCore.Signal"
},
{
"identifier": "StreamManifest",
"path": "tidal_dl_ng/model/tidal.py",
"snippet": "class StreamManifest:\n codecs: str\n mime_type: str\n urls: [str]\n file_extension: str\n encryption_type: str | None = None\n encryption_key: str | None = None"
}
] | import base64
import json
import os
import random
import shutil
import tempfile
import time
import ffmpeg
import m3u8
import requests
from collections.abc import Callable
from logging import Logger
from uuid import uuid4
from mpegdash.parser import MPEGDASHParser
from requests.exceptions import HTTPError
from rich.progress import Progress, TaskID
from tidalapi import Album, Mix, Playlist, Session, Track, UserPlaylist, Video
from tidal_dl_ng.config import Settings
from tidal_dl_ng.constants import REQUESTS_TIMEOUT_SEC, CoverDimensions, MediaType, SkipExisting, StreamManifestMimeType
from tidal_dl_ng.helper.decryption import decrypt_file, decrypt_security_token
from tidal_dl_ng.helper.exceptions import MediaMissing, MediaUnknown, UnknownManifestFormat
from tidal_dl_ng.helper.path import check_file_exists, format_path_media, path_file_sanitize
from tidal_dl_ng.helper.tidal import name_builder_item
from tidal_dl_ng.helper.wrapper import WrapperLogger
from tidal_dl_ng.metadata import Metadata
from tidal_dl_ng.model.gui_data import ProgressBars
from tidal_dl_ng.model.tidal import StreamManifest | 5,153 | # Advance progress bar.
progress.advance(p_task)
# To send the progress to the GUI, we need to emit the percentage.
if not progress_stdout:
progress_gui.item.emit(progress.tasks[p_task].percentage)
except HTTPError as e:
# TODO: Handle Exception...
fn_logger(e)
# Check if file is encrypted.
needs_decryption = self.is_encrypted(stream_manifest.encryption_type)
if needs_decryption:
key, nonce = decrypt_security_token(stream_manifest.encryption_key)
tmp_path_file_decrypted = path_file + "_decrypted"
decrypt_file(path_file, tmp_path_file_decrypted, key, nonce)
else:
tmp_path_file_decrypted = path_file
# Write metadata to file.
if not isinstance(media, Video):
self.metadata_write(media, tmp_path_file_decrypted)
return tmp_path_file_decrypted
def instantiate_media(
self,
session: Session,
media_type: type[MediaType.TRACK, MediaType.VIDEO, MediaType.ALBUM, MediaType.PLAYLIST, MediaType.MIX],
id_media: str,
) -> Track | Video:
if media_type == MediaType.TRACK:
media = Track(session, id_media)
elif media_type == MediaType.VIDEO:
media = Video(session, id_media)
elif media_type == MediaType.ALBUM:
media = Album(self.session, id_media)
elif media_type == MediaType.PLAYLIST:
media = Playlist(self.session, id_media)
elif media_type == MediaType.MIX:
media = Mix(self.session, id_media)
else:
raise MediaUnknown
return media
def item(
self,
path_base: str,
file_template: str,
fn_logger: Callable,
media: Track | Video = None,
media_id: str = None,
media_type: MediaType = None,
video_download: bool = True,
progress_gui: ProgressBars = None,
progress: Progress = None,
) -> (bool, str):
# If no media instance is provided, we need to create the media instance.
if media_id and media_type:
media = self.instantiate_media(self.session, media_type, media_id)
elif not media:
raise MediaMissing
# If video download is not allowed end here
if not video_download:
fn_logger.info(
f"Video downloads are deactivated (see settings). Skipping video: {name_builder_item(media)}"
)
return False, ""
# Create file name and path
file_name_relative = format_path_media(file_template, media)
path_file = os.path.abspath(os.path.normpath(os.path.join(path_base, file_name_relative)))
# Populate StreamManifest for further download.
if isinstance(media, Track):
stream = media.stream()
manifest: str = stream.manifest
mime_type: str = stream.manifest_mime_type
else:
manifest: str = media.get_url()
mime_type: str = StreamManifestMimeType.VIDEO.value
stream_manifest = self.stream_manifest_parse(manifest, mime_type)
# Sanitize final path_file to fit into OS boundaries.
path_file = path_file_sanitize(path_file + stream_manifest.file_extension, adapt=True)
# Compute if and how downloads need to be skipped.
if self.skip_existing.value:
extension_ignore = self.skip_existing == SkipExisting.ExtensionIgnore
# TODO: Check if extension is already in `path_file` or not.
download_skip = check_file_exists(path_file, extension_ignore=extension_ignore)
else:
download_skip = False
if not download_skip:
# Create a temp directory and file.
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path_dir:
tmp_path_file = os.path.join(tmp_path_dir, str(uuid4()) + stream_manifest.file_extension)
# Download media.
tmp_path_file = self._download(fn_logger, media, progress, progress_gui, stream_manifest, tmp_path_file)
if isinstance(media, Video) and self.settings.data.video_convert_mp4:
# TODO: Make optional.
# Convert `*.ts` file to `*.mp4` using ffmpeg
tmp_path_file = self._video_convert(tmp_path_file)
path_file = os.path.splitext(path_file)[0] + ".mp4"
# Move final file to the configured destination directory.
os.makedirs(os.path.dirname(path_file), exist_ok=True)
shutil.move(tmp_path_file, path_file)
else:
fn_logger.debug(f"Download skipped, since file exists: '{path_file}'")
return not download_skip, path_file
|
# TODO: Set appropriate client string and use it for video download.
# https://github.com/globocom/m3u8#using-different-http-clients
class RequestsClient:
def download(
self, uri: str, timeout: int = REQUESTS_TIMEOUT_SEC, headers: dict | None = None, verify_ssl: bool = True
):
if not headers:
headers = {}
o = requests.get(uri, timeout=timeout, headers=headers)
return o.text, o.url
class Download:
settings: Settings = None
session: Session = None
skip_existing: SkipExisting = False
def __init__(self, session: Session, skip_existing: SkipExisting = SkipExisting.Disabled):
self.settings = Settings()
self.session = session
self.skip_existing = skip_existing
def _download(
self,
fn_logger: Callable,
media: Track | Video,
progress: Progress,
progress_gui: ProgressBars,
stream_manifest: StreamManifest,
path_file: str,
):
media_name: str = name_builder_item(media)
# Set the correct progress output channel.
if progress_gui is None:
progress_stdout: bool = True
else:
progress_stdout: bool = False
# Send signal to GUI with media name
progress_gui.item_name.emit(media_name)
try:
# Compute total iterations for progress
urls_count: int = len(stream_manifest.urls)
if urls_count > 1:
progress_total: int = urls_count
block_size: int | None = None
else:
# Compute progress iterations based on the file size.
r = requests.get(stream_manifest.urls[0], stream=True, timeout=REQUESTS_TIMEOUT_SEC)
r.raise_for_status()
# Get file size and compute progress steps
total_size_in_bytes: int = int(r.headers.get("content-length", 0))
block_size: int | None = 4096
progress_total: float = total_size_in_bytes / block_size
# Create progress Task
p_task: TaskID = progress.add_task(
f"[blue]Item '{media_name[:30]}'",
total=progress_total,
visible=progress_stdout,
)
# Write content to file until progress is finished.
while not progress.tasks[p_task].finished:
with open(path_file, "wb") as f:
for url in stream_manifest.urls:
# Create the request object with stream=True, so the content won't be loaded into memory at once.
r = requests.get(url, stream=True, timeout=REQUESTS_TIMEOUT_SEC)
r.raise_for_status()
# Write the content to disk. If `chunk_size` is set to `None` the whole file will be written at once.
for data in r.iter_content(chunk_size=block_size):
f.write(data)
# Advance progress bar.
progress.advance(p_task)
# To send the progress to the GUI, we need to emit the percentage.
if not progress_stdout:
progress_gui.item.emit(progress.tasks[p_task].percentage)
except HTTPError as e:
# TODO: Handle Exception...
fn_logger(e)
# Check if file is encrypted.
needs_decryption = self.is_encrypted(stream_manifest.encryption_type)
if needs_decryption:
key, nonce = decrypt_security_token(stream_manifest.encryption_key)
tmp_path_file_decrypted = path_file + "_decrypted"
decrypt_file(path_file, tmp_path_file_decrypted, key, nonce)
else:
tmp_path_file_decrypted = path_file
# Write metadata to file.
if not isinstance(media, Video):
self.metadata_write(media, tmp_path_file_decrypted)
return tmp_path_file_decrypted
def instantiate_media(
self,
session: Session,
media_type: type[MediaType.TRACK, MediaType.VIDEO, MediaType.ALBUM, MediaType.PLAYLIST, MediaType.MIX],
id_media: str,
) -> Track | Video:
if media_type == MediaType.TRACK:
media = Track(session, id_media)
elif media_type == MediaType.VIDEO:
media = Video(session, id_media)
elif media_type == MediaType.ALBUM:
media = Album(self.session, id_media)
elif media_type == MediaType.PLAYLIST:
media = Playlist(self.session, id_media)
elif media_type == MediaType.MIX:
media = Mix(self.session, id_media)
else:
raise MediaUnknown
return media
def item(
self,
path_base: str,
file_template: str,
fn_logger: Callable,
media: Track | Video = None,
media_id: str = None,
media_type: MediaType = None,
video_download: bool = True,
progress_gui: ProgressBars = None,
progress: Progress = None,
) -> (bool, str):
# If no media instance is provided, we need to create the media instance.
if media_id and media_type:
media = self.instantiate_media(self.session, media_type, media_id)
elif not media:
raise MediaMissing
# If video download is not allowed end here
if not video_download:
fn_logger.info(
f"Video downloads are deactivated (see settings). Skipping video: {name_builder_item(media)}"
)
return False, ""
# Create file name and path
file_name_relative = format_path_media(file_template, media)
path_file = os.path.abspath(os.path.normpath(os.path.join(path_base, file_name_relative)))
# Populate StreamManifest for further download.
if isinstance(media, Track):
stream = media.stream()
manifest: str = stream.manifest
mime_type: str = stream.manifest_mime_type
else:
manifest: str = media.get_url()
mime_type: str = StreamManifestMimeType.VIDEO.value
stream_manifest = self.stream_manifest_parse(manifest, mime_type)
# Sanitize final path_file to fit into OS boundaries.
path_file = path_file_sanitize(path_file + stream_manifest.file_extension, adapt=True)
# Compute if and how downloads need to be skipped.
if self.skip_existing.value:
extension_ignore = self.skip_existing == SkipExisting.ExtensionIgnore
# TODO: Check if extension is already in `path_file` or not.
download_skip = check_file_exists(path_file, extension_ignore=extension_ignore)
else:
download_skip = False
if not download_skip:
# Create a temp directory and file.
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmp_path_dir:
tmp_path_file = os.path.join(tmp_path_dir, str(uuid4()) + stream_manifest.file_extension)
# Download media.
tmp_path_file = self._download(fn_logger, media, progress, progress_gui, stream_manifest, tmp_path_file)
if isinstance(media, Video) and self.settings.data.video_convert_mp4:
# TODO: Make optional.
# Convert `*.ts` file to `*.mp4` using ffmpeg
tmp_path_file = self._video_convert(tmp_path_file)
path_file = os.path.splitext(path_file)[0] + ".mp4"
# Move final file to the configured destination directory.
os.makedirs(os.path.dirname(path_file), exist_ok=True)
shutil.move(tmp_path_file, path_file)
else:
fn_logger.debug(f"Download skipped, since file exists: '{path_file}'")
return not download_skip, path_file
| def cover_url(self, sid: str, dimension: CoverDimensions = CoverDimensions.Px320): | 2 | 2023-12-19 23:05:47+00:00 | 8k |
smoores-dev/storyteller | storyteller/synchronize/sync.py | [
{
"identifier": "CACHE_DIR",
"path": "storyteller/synchronize/files.py",
"snippet": "CACHE_DIR = f\"{DATA_DIR}/cache\""
},
{
"identifier": "TEXT_DIR",
"path": "storyteller/synchronize/files.py",
"snippet": "TEXT_DIR = f\"{DATA_DIR}/assets/text\""
},
{
"identifier": "get_audio_chapter_filenames",
"path": "storyteller/synchronize/audio.py",
"snippet": "def get_audio_chapter_filenames(book_name: str):\n book_dir = get_audio_directory(book_name)\n dirname = get_chapters_path(book_dir)\n return sorted([str(Path(dirname, filename)) for filename in os.listdir(dirname)])"
},
{
"identifier": "get_transcriptions",
"path": "storyteller/synchronize/audio.py",
"snippet": "def get_transcriptions(book_name: str):\n audio_chapter_filenames = get_audio_chapter_filenames(book_name)\n transcription_filenames = [\n get_transcription_filename(chapter_filename)\n for chapter_filename in audio_chapter_filenames\n ]\n transcriptions: List[whisperx.types.AlignedTranscriptionResult] = []\n\n for transcription_filename in transcription_filenames:\n with open(transcription_filename, mode=\"r\") as transcription_file:\n transcription = json.load(transcription_file)\n transcriptions.append(transcription)\n\n return transcriptions"
},
{
"identifier": "SentenceRange",
"path": "storyteller/synchronize/epub.py",
"snippet": "class SentenceRange:\n id: int\n start: float\n end: float\n audiofile: str"
},
{
"identifier": "create_media_overlay",
"path": "storyteller/synchronize/epub.py",
"snippet": "def create_media_overlay(\n base_filename: str,\n chapter_filename: str,\n sentence_ranges: List[SentenceRange],\n):\n soup = BeautifulSoup(\n \"\"\"\n<smil xmlns=\"http://www.w3.org/ns/SMIL\" xmlns:epub=\"http://www.idpf.org/2007/ops\" version=\"3.0\">\n <body>\n </body>\n</smil>\n\"\"\",\n \"xml\",\n )\n\n seq = soup.new_tag(\"seq\", id=f\"{base_filename}_overlay\")\n seq[\"epub:textref\"] = f\"../{chapter_filename}\"\n seq[\"epub:type\"] = \"chapter\"\n soup.body.append(seq) # type: ignore\n for sentence_range in sentence_ranges:\n par = soup.new_tag(\"par\", id=f\"sentence{sentence_range.id}\")\n text = soup.new_tag(\n \"text\", src=f\"../{chapter_filename}#sentence{sentence_range.id}\"\n )\n audio = soup.new_tag(\n \"audio\",\n src=f\"../{get_epub_audio_filename(sentence_range.audiofile)}\",\n clipBegin=f\"{sentence_range.start}s\",\n clipEnd=f\"{sentence_range.end}s\",\n )\n par.append(text)\n par.append(\"\\n\")\n par.append(audio)\n par.append(\"\\n\")\n seq.append(par)\n seq.append(\"\\n\")\n return soup.encode(formatter=\"minimal\")"
},
{
"identifier": "get_chapter_sentences",
"path": "storyteller/synchronize/epub.py",
"snippet": "@cache\ndef get_chapter_sentences(chapter: epub.EpubHtml):\n soup = BeautifulSoup(chapter.get_body_content(), \"html.parser\")\n textblocks = soup.find_all(\n [\"p\", \"li\", \"blockquote\", \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n )\n\n return [\n re.sub(consecutivenewlines, \" \", sentence)\n for textblock in textblocks\n if isinstance(textblock, Tag)\n for sentence in sent_tokenize(textblock.get_text())\n ]"
},
{
"identifier": "get_chapter_text",
"path": "storyteller/synchronize/epub.py",
"snippet": "@cache\ndef get_chapter_text(chapter: epub.EpubHtml):\n soup = BeautifulSoup(chapter.get_body_content(), \"html.parser\")\n return re.sub(consecutivenewlines, \" \", soup.get_text())"
},
{
"identifier": "get_epub_audio_filename",
"path": "storyteller/synchronize/epub.py",
"snippet": "def get_epub_audio_filename(audio_filename: str) -> str:\n return f\"Audio/{os.path.basename(audio_filename)}\""
},
{
"identifier": "get_sentences_with_offsets",
"path": "storyteller/synchronize/epub.py",
"snippet": "def get_sentences_with_offsets(text: str):\n sentences = sent_tokenize(text)\n sentences_with_offsets: list[str] = []\n last_sentence_end = 0\n for sentence in sentences:\n sentence_start = text.find(sentence, last_sentence_end)\n if sentence_start > last_sentence_end:\n sentences_with_offsets.append(text[last_sentence_end:sentence_start])\n\n sentences_with_offsets.append(sentence)\n last_sentence_end = sentence_start + len(sentence)\n\n if len(text) > last_sentence_end:\n sentences_with_offsets.append(text[last_sentence_end:])\n\n return sentences_with_offsets"
},
{
"identifier": "read_epub",
"path": "storyteller/synchronize/epub.py",
"snippet": "def read_epub(book_name: str):\n book = epub.read_epub(get_epub_filepath(book_name))\n for item in book.get_items_of_type(ITEM_DOCUMENT):\n if not item.is_chapter():\n continue\n soup = BeautifulSoup(item.content)\n\n head: Union[Tag, None] = soup.find(\"head\") # type: ignore\n if head is not None:\n links = head.find_all(\"link\")\n for link in links:\n item.add_link(\n href=link[\"href\"], rel=\" \".join(link[\"rel\"]), type=link[\"type\"]\n )\n return book"
},
{
"identifier": "get_chapters",
"path": "storyteller/synchronize/epub.py",
"snippet": "def get_chapters(book: epub.EpubBook) -> List[epub.EpubHtml]:\n spine_ids = [item[0] for item in book.spine]\n chapters = [cast(epub.EpubHtml, book.get_item_with_id(id)) for id in spine_ids]\n return chapters"
},
{
"identifier": "tag_sentences",
"path": "storyteller/synchronize/epub.py",
"snippet": "def tag_sentences(chapter: epub.EpubHtml):\n content = cast(str, chapter.get_content())\n soup = BeautifulSoup(content, \"html.parser\")\n body_soup = soup.find(\"body\")\n if body_soup is None:\n return\n if isinstance(body_soup, NavigableString):\n return\n textblocks = body_soup.find_all(\n [\"p\", \"li\", \"blockquote\", \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"]\n )\n start_id = 0\n for textblock in textblocks:\n if not isinstance(textblock, Tag):\n continue\n\n spans = get_textblock_spans(start_id, textblock)\n new_content = serialize_spans(soup, spans)\n textblock.clear()\n textblock.extend(new_content)\n\n try:\n start_id = get_last_span_id(spans) + 1\n except StopIteration:\n pass\n\n chapter.set_content(soup.encode())"
}
] | from dataclasses import dataclass
from itertools import groupby
from pathlib import Path
from typing import Any, Callable, Dict, List, TypedDict, Union, cast
from fuzzysearch import Match, find_near_matches
from ebooklib import epub
from mutagen.mp4 import MP4
from mutagen.mp3 import MP3
from .files import CACHE_DIR, TEXT_DIR
from .audio import (
get_audio_chapter_filenames,
get_transcriptions,
)
from .epub import (
SentenceRange,
create_media_overlay,
get_chapter_sentences,
get_chapter_text,
get_epub_audio_filename,
get_sentences_with_offsets,
read_epub,
get_chapters,
tag_sentences,
)
import json
import math
import os
import sys
import whisperx.types | 3,768 | count = sentence_range.id - last_sentence_range.id
diff = last_sentence_range.end - last_sentence_range.start
interpolated_length = diff / count
for i in range(1, count):
interpolated_sentence_range = SentenceRange(
last_sentence_range.id + i,
last_sentence_range.start + interpolated_length * i,
last_sentence_range.start + interpolated_length * (i + 1),
last_sentence_range.audiofile,
)
interpolated.append(interpolated_sentence_range)
interpolated.append(sentence_range)
return interpolated
def get_chapter_duration(sentence_ranges: List[SentenceRange]):
duration = 0
for _, file_group in groupby(sentence_ranges, key=lambda r: r.audiofile):
file_group_list = list(file_group)
duration += file_group_list[-1].end - file_group_list[0].start
return duration
@dataclass
class SyncedChapter:
chapter: epub.EpubHtml
sentence_ranges: List[SentenceRange]
audio: List[epub.EpubItem]
def sync_chapter(
start_sentence: int,
transcription: StorytellerTranscription,
chapter: epub.EpubHtml,
transcription_offset: int,
last_sentence_range: Union[SentenceRange, None],
):
chapter_sentences = get_chapter_sentences(chapter)
sentence_ranges = get_sentence_ranges(
start_sentence,
transcription,
chapter_sentences,
transcription_offset,
last_sentence_range,
)
sentence_ranges = interpolate_sentence_ranges(sentence_ranges)
tag_sentences(chapter)
chapter_filepath_length = len(chapter.file_name.split(os.path.sep)) - 1
relative_ups = "../" * chapter_filepath_length
chapter.add_link(
rel="stylesheet",
href=f"{relative_ups}Styles/storyteller-readaloud.css",
type="text/css",
)
audiofiles = set([sentence_range.audiofile for sentence_range in sentence_ranges])
audio_items = []
for audiofile in audiofiles:
epub_audio_filename = get_epub_audio_filename(audiofile)
audio_item = epub.EpubItem(
uid=epub_audio_filename,
file_name=epub_audio_filename,
content=open(audiofile, "rb").read(), # type: ignore
media_type="audio/mpeg",
)
audio_items.append(audio_item)
return SyncedChapter(
chapter=chapter,
sentence_ranges=sentence_ranges,
audio=audio_items,
)
def format_duration(duration: float):
hours = math.floor(duration / 3600)
minutes = math.floor(duration / 60 - hours * 60)
seconds = duration - minutes * 60 - hours * 3600
return f"{str(hours).zfill(2)}:{str(minutes).zfill(2)}:{round(seconds, 3)}"
def update_synced_chapter(book: epub.EpubBook, synced: SyncedChapter):
base_filename, _ = os.path.splitext(os.path.basename(synced.chapter.file_name))
media_overlay_item = epub.EpubSMIL(
uid=f"{base_filename}_overlay",
file_name=f"MediaOverlays/{base_filename}.smil",
content=create_media_overlay(
base_filename, synced.chapter.file_name, synced.sentence_ranges
),
)
synced.chapter.media_overlay = media_overlay_item.id
duration = get_chapter_duration(synced.sentence_ranges)
book.add_metadata(
None,
"meta",
format_duration(duration),
{"property": "media:duration", "refines": f"#{media_overlay_item.id}"},
)
for audio_item in synced.audio:
if book.get_item_with_id(audio_item.id) is None:
book.add_item(audio_item)
book.add_item(media_overlay_item)
return duration
def sync_book(
ebook_name: str,
audiobook_name: str,
on_progress: Callable[[float], None] | None = None,
):
|
class NullIO:
def __enter__(self):
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
sys.stderr.close()
sys.stderr = self._original_stderr
OFFSET_SEARCH_WINDOW_SIZE = 5000
def find_best_offset(
epub_sentences: list[str], transcription_text: str, last_match_offset: int
):
i = 0
while i < len(transcription_text):
start_sentence = 0
start_index = (last_match_offset + i) % len(transcription_text)
end_index = (start_index + OFFSET_SEARCH_WINDOW_SIZE) % len(transcription_text)
if end_index > start_index:
transcription_text_slice = transcription_text[start_index:end_index]
else:
transcription_text_slice = (
transcription_text[start_index:] + transcription_text[:end_index]
)
while start_sentence < len(epub_sentences):
query_string = " ".join(epub_sentences[start_sentence : start_sentence + 6])
with NullIO():
matches = find_near_matches(
query_string.lower(),
transcription_text_slice.lower(),
max_l_dist=math.floor(0.1 * len(query_string)),
)
matches = cast(List[Match], matches)
if len(matches) > 0:
return (start_sentence, matches[0].start + start_index)
start_sentence += 3
i += OFFSET_SEARCH_WINDOW_SIZE // 2
return (0, None)
class StorytellerTranscriptionSegment(whisperx.types.SingleAlignedSegment):
audiofile: str
class StorytellerTranscription(TypedDict):
segments: List[StorytellerTranscriptionSegment]
word_segments: List[whisperx.types.SingleWordSegment]
def concat_transcriptions(
transcriptions: List[whisperx.types.AlignedTranscriptionResult],
audiofiles: List[str],
):
result = StorytellerTranscription(segments=[], word_segments=[])
for transcription, audiofile in zip(transcriptions, audiofiles):
result["word_segments"].extend(transcription["word_segments"])
result["segments"].extend(
[
StorytellerTranscriptionSegment(**segment, audiofile=audiofile)
for segment in transcription["segments"]
]
)
return result
def get_transcription_text(transcription: StorytellerTranscription):
return " ".join([segment["text"] for segment in transcription["segments"]])
def find_timestamps(match_start_index: int, transcription: StorytellerTranscription):
s = 0
position = 0
while True:
while position + len(transcription["segments"][s]["text"]) < match_start_index: # type: ignore
position += len(transcription["segments"][s]["text"]) + 1 # type: ignore
s += 1
w = 0
segment = transcription["segments"][s]
while (
w < len(segment["words"])
and position + len(segment["words"][w]["word"]) <= match_start_index
):
position += len(segment["words"][w]["word"]) + 1
w += 1
if w >= len(segment["words"]):
s += 1
continue
break
start_word = segment["words"][w]
# If a segment only has one word, the start and
# end timestamps are only placed on the segment
if "start" in start_word:
return start_word["start"], segment["audiofile"]
return segment["start"], segment["audiofile"]
def get_window_index_from_offset(window: List[str], offset: int):
index = 0
while offset >= len(window[index]):
offset -= len(window[index])
index += 1
return index
def get_sentence_ranges(
start_sentence: int,
transcription: StorytellerTranscription,
sentences: List[str],
chapter_offset: int,
last_sentence_range: Union[SentenceRange, None],
):
sentence_ranges: List[SentenceRange] = []
transcription_text = get_transcription_text(transcription).lower()[chapter_offset:]
transcription_sentences = get_sentences_with_offsets(transcription_text)
transcription_window_index = 0
last_good_transcription_window = 0
not_found = 0
sentence_index = start_sentence
while sentence_index < len(sentences):
sentence = sentences[sentence_index]
transcription_window_list = transcription_sentences[
transcription_window_index : transcription_window_index + 10
]
transcription_window = "".join(transcription_window_list)
matches = find_near_matches(
sentence.strip().lower(),
transcription_window,
max_l_dist=math.floor(0.25 * len(sentence)),
)
matches = cast(List[Match], matches)
if len(matches) == 0:
sentence_index += 1
not_found += 1
if not_found == 3 or sentence_index == len(sentences) - 1:
transcription_window_index += 1
if transcription_window_index == last_good_transcription_window + 30:
transcription_window_index = last_good_transcription_window
not_found = 0
continue
sentence_index -= not_found
not_found = 0
continue
first_match = matches[0]
transcription_offset = (
len("".join(transcription_sentences[:transcription_window_index])) + 1
)
start, audiofile = find_timestamps(
first_match.start + transcription_offset + chapter_offset, transcription
)
if len(sentence_ranges) > 0:
last_audiofile = sentence_ranges[-1].audiofile
if audiofile == last_audiofile:
sentence_ranges[-1].end = start
else:
last_mp4 = (
MP4(last_audiofile)
if last_audiofile.endswith(".mp4")
else MP3(last_audiofile)
)
sentence_ranges[-1].end = last_mp4.info.length
start = 0
elif last_sentence_range is not None:
if audiofile == last_sentence_range.audiofile:
last_sentence_range.end = start
else:
last_mp4 = (
MP4(last_sentence_range.audiofile)
if last_sentence_range.audiofile.endswith(".mp4")
else MP3(last_sentence_range.audiofile)
)
last_sentence_range.end = last_mp4.info.length
start = 0
else:
start = 0
sentence_ranges.append(SentenceRange(sentence_index, start, start, audiofile))
not_found = 0
transcription_window_index = (
get_window_index_from_offset(transcription_window_list, first_match.start)
+ transcription_window_index
)
last_good_transcription_window = transcription_window_index
sentence_index += 1
return sentence_ranges
def interpolate_sentence_ranges(
sentence_ranges: List[SentenceRange],
) -> List[SentenceRange]:
interpolated: List[SentenceRange] = []
for sentence_range in sentence_ranges:
if len(interpolated) == 0:
interpolated.append(sentence_range)
continue
last_sentence_range = interpolated[-1]
count = sentence_range.id - last_sentence_range.id
diff = last_sentence_range.end - last_sentence_range.start
interpolated_length = diff / count
for i in range(1, count):
interpolated_sentence_range = SentenceRange(
last_sentence_range.id + i,
last_sentence_range.start + interpolated_length * i,
last_sentence_range.start + interpolated_length * (i + 1),
last_sentence_range.audiofile,
)
interpolated.append(interpolated_sentence_range)
interpolated.append(sentence_range)
return interpolated
def get_chapter_duration(sentence_ranges: List[SentenceRange]):
duration = 0
for _, file_group in groupby(sentence_ranges, key=lambda r: r.audiofile):
file_group_list = list(file_group)
duration += file_group_list[-1].end - file_group_list[0].start
return duration
@dataclass
class SyncedChapter:
chapter: epub.EpubHtml
sentence_ranges: List[SentenceRange]
audio: List[epub.EpubItem]
def sync_chapter(
start_sentence: int,
transcription: StorytellerTranscription,
chapter: epub.EpubHtml,
transcription_offset: int,
last_sentence_range: Union[SentenceRange, None],
):
chapter_sentences = get_chapter_sentences(chapter)
sentence_ranges = get_sentence_ranges(
start_sentence,
transcription,
chapter_sentences,
transcription_offset,
last_sentence_range,
)
sentence_ranges = interpolate_sentence_ranges(sentence_ranges)
tag_sentences(chapter)
chapter_filepath_length = len(chapter.file_name.split(os.path.sep)) - 1
relative_ups = "../" * chapter_filepath_length
chapter.add_link(
rel="stylesheet",
href=f"{relative_ups}Styles/storyteller-readaloud.css",
type="text/css",
)
audiofiles = set([sentence_range.audiofile for sentence_range in sentence_ranges])
audio_items = []
for audiofile in audiofiles:
epub_audio_filename = get_epub_audio_filename(audiofile)
audio_item = epub.EpubItem(
uid=epub_audio_filename,
file_name=epub_audio_filename,
content=open(audiofile, "rb").read(), # type: ignore
media_type="audio/mpeg",
)
audio_items.append(audio_item)
return SyncedChapter(
chapter=chapter,
sentence_ranges=sentence_ranges,
audio=audio_items,
)
def format_duration(duration: float):
hours = math.floor(duration / 3600)
minutes = math.floor(duration / 60 - hours * 60)
seconds = duration - minutes * 60 - hours * 3600
return f"{str(hours).zfill(2)}:{str(minutes).zfill(2)}:{round(seconds, 3)}"
def update_synced_chapter(book: epub.EpubBook, synced: SyncedChapter):
base_filename, _ = os.path.splitext(os.path.basename(synced.chapter.file_name))
media_overlay_item = epub.EpubSMIL(
uid=f"{base_filename}_overlay",
file_name=f"MediaOverlays/{base_filename}.smil",
content=create_media_overlay(
base_filename, synced.chapter.file_name, synced.sentence_ranges
),
)
synced.chapter.media_overlay = media_overlay_item.id
duration = get_chapter_duration(synced.sentence_ranges)
book.add_metadata(
None,
"meta",
format_duration(duration),
{"property": "media:duration", "refines": f"#{media_overlay_item.id}"},
)
for audio_item in synced.audio:
if book.get_item_with_id(audio_item.id) is None:
book.add_item(audio_item)
book.add_item(media_overlay_item)
return duration
def sync_book(
ebook_name: str,
audiobook_name: str,
on_progress: Callable[[float], None] | None = None,
): | book = read_epub(ebook_name) | 10 | 2023-12-15 16:07:12+00:00 | 8k |
zyrant/SPGroup3D | mmdet3d/core/visualizer/show_result.py | [
{
"identifier": "draw_camera_bbox3d_on_img",
"path": "mmdet3d/core/visualizer/image_vis.py",
"snippet": "def draw_camera_bbox3d_on_img(bboxes3d,\n raw_img,\n cam2img,\n img_metas,\n color=(0, 255, 0),\n thickness=1):\n \"\"\"Project the 3D bbox on 2D plane and draw on input image.\n\n Args:\n bboxes3d (:obj:`CameraInstance3DBoxes`, shape=[M, 7]):\n 3d bbox in camera coordinate system to visualize.\n raw_img (numpy.array): The numpy array of image.\n cam2img (dict): Camera intrinsic matrix,\n denoted as `K` in depth bbox coordinate system.\n img_metas (dict): Useless here.\n color (tuple[int], optional): The color to draw bboxes.\n Default: (0, 255, 0).\n thickness (int, optional): The thickness of bboxes. Default: 1.\n \"\"\"\n from mmdet3d.core.bbox import points_cam2img\n\n img = raw_img.copy()\n cam2img = copy.deepcopy(cam2img)\n corners_3d = bboxes3d.corners\n num_bbox = corners_3d.shape[0]\n points_3d = corners_3d.reshape(-1, 3)\n if not isinstance(cam2img, torch.Tensor):\n cam2img = torch.from_numpy(np.array(cam2img))\n\n assert (cam2img.shape == torch.Size([3, 3])\n or cam2img.shape == torch.Size([4, 4]))\n cam2img = cam2img.float().cpu()\n\n # project to 2d to get image coords (uv)\n uv_origin = points_cam2img(points_3d, cam2img)\n uv_origin = (uv_origin - 1).round()\n imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy()\n\n return plot_rect3d_on_img(img, num_bbox, imgfov_pts_2d, color, thickness)"
},
{
"identifier": "draw_depth_bbox3d_on_img",
"path": "mmdet3d/core/visualizer/image_vis.py",
"snippet": "def draw_depth_bbox3d_on_img(bboxes3d,\n raw_img,\n calibs,\n img_metas,\n color=(0, 255, 0),\n thickness=1):\n \"\"\"Project the 3D bbox on 2D plane and draw on input image.\n\n Args:\n bboxes3d (:obj:`DepthInstance3DBoxes`, shape=[M, 7]):\n 3d bbox in depth coordinate system to visualize.\n raw_img (numpy.array): The numpy array of image.\n calibs (dict): Camera calibration information, Rt and K.\n img_metas (dict): Used in coordinates transformation.\n color (tuple[int], optional): The color to draw bboxes.\n Default: (0, 255, 0).\n thickness (int, optional): The thickness of bboxes. Default: 1.\n \"\"\"\n from mmdet3d.core.bbox import points_cam2img\n from mmdet3d.models import apply_3d_transformation\n\n img = raw_img.copy()\n img_metas = copy.deepcopy(img_metas)\n corners_3d = bboxes3d.corners\n num_bbox = corners_3d.shape[0]\n points_3d = corners_3d.reshape(-1, 3)\n\n # first reverse the data transformations\n xyz_depth = apply_3d_transformation(\n points_3d, 'DEPTH', img_metas, reverse=True)\n\n # project to 2d to get image coords (uv)\n uv_origin = points_cam2img(xyz_depth,\n xyz_depth.new_tensor(img_metas['depth2img']))\n uv_origin = (uv_origin - 1).round()\n imgfov_pts_2d = uv_origin[..., :2].reshape(num_bbox, 8, 2).numpy()\n\n return plot_rect3d_on_img(img, num_bbox, imgfov_pts_2d, color, thickness)"
},
{
"identifier": "draw_lidar_bbox3d_on_img",
"path": "mmdet3d/core/visualizer/image_vis.py",
"snippet": "def draw_lidar_bbox3d_on_img(bboxes3d,\n raw_img,\n lidar2img_rt,\n img_metas,\n color=(0, 255, 0),\n thickness=1):\n \"\"\"Project the 3D bbox on 2D plane and draw on input image.\n\n Args:\n bboxes3d (:obj:`LiDARInstance3DBoxes`):\n 3d bbox in lidar coordinate system to visualize.\n raw_img (numpy.array): The numpy array of image.\n lidar2img_rt (numpy.array, shape=[4, 4]): The projection matrix\n according to the camera intrinsic parameters.\n img_metas (dict): Useless here.\n color (tuple[int], optional): The color to draw bboxes.\n Default: (0, 255, 0).\n thickness (int, optional): The thickness of bboxes. Default: 1.\n \"\"\"\n img = raw_img.copy()\n corners_3d = bboxes3d.corners\n num_bbox = corners_3d.shape[0]\n pts_4d = np.concatenate(\n [corners_3d.reshape(-1, 3),\n np.ones((num_bbox * 8, 1))], axis=-1)\n lidar2img_rt = copy.deepcopy(lidar2img_rt).reshape(4, 4)\n if isinstance(lidar2img_rt, torch.Tensor):\n lidar2img_rt = lidar2img_rt.cpu().numpy()\n pts_2d = pts_4d @ lidar2img_rt.T\n\n pts_2d[:, 2] = np.clip(pts_2d[:, 2], a_min=1e-5, a_max=1e5)\n pts_2d[:, 0] /= pts_2d[:, 2]\n pts_2d[:, 1] /= pts_2d[:, 2]\n imgfov_pts_2d = pts_2d[..., :2].reshape(num_bbox, 8, 2)\n\n return plot_rect3d_on_img(img, num_bbox, imgfov_pts_2d, color, thickness)"
}
] | from os import path as osp
from .image_vis import (draw_camera_bbox3d_on_img, draw_depth_bbox3d_on_img,
draw_lidar_bbox3d_on_img)
from .open3d_vis import Visualizer
from .open3d_vis import Visualizer
import mmcv
import numpy as np
import trimesh
import matplotlib.pyplot as plt | 4,274 | superpoints = map_color[superpoints]
superpoints = np.concatenate([points[:, :3], superpoints],
axis=1)
_write_obj(superpoints, osp.join(result_path, f'{filename}_superpoints.obj'))
if gt_corners is not None:
_write_oriented_bbox_v2(gt_corners, gt_labels,
osp.join(result_path, f'{filename}_gt.obj'))
if pred_corners is not None:
_write_oriented_bbox_v2(pred_corners, pred_labels,
osp.join(result_path, f'{filename}_pred.obj'))
def show_seg_result(points,
gt_seg,
pred_seg,
out_dir,
filename,
palette,
ignore_index=None,
show=False,
snapshot=False):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_seg (np.ndarray): Ground truth segmentation mask.
pred_seg (np.ndarray): Predicted segmentation mask.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
palette (np.ndarray): Mapping between class labels and colors.
ignore_index (int, optional): The label index to be ignored, e.g.
unannotated points. Defaults to None.
show (bool, optional): Visualize the results online. Defaults to False.
snapshot (bool, optional): Whether to save the online results.
Defaults to False.
"""
# we need 3D coordinates to visualize segmentation mask
if gt_seg is not None or pred_seg is not None:
assert points is not None, \
'3D coordinates are required for segmentation visualization'
# filter out ignored points
if gt_seg is not None and ignore_index is not None:
if points is not None:
points = points[gt_seg != ignore_index]
if pred_seg is not None:
pred_seg = pred_seg[gt_seg != ignore_index]
gt_seg = gt_seg[gt_seg != ignore_index]
if gt_seg is not None:
gt_seg_color = palette[gt_seg]
gt_seg_color = np.concatenate([points[:, :3], gt_seg_color], axis=1)
if pred_seg is not None:
pred_seg_color = palette[pred_seg]
pred_seg_color = np.concatenate([points[:, :3], pred_seg_color],
axis=1)
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
# online visualization of segmentation mask
# we show three masks in a row, scene_points, gt_mask, pred_mask
if show:
mode = 'xyzrgb' if points.shape[1] == 6 else 'xyz'
vis = Visualizer(points, mode=mode)
if gt_seg is not None:
vis.add_seg_mask(gt_seg_color)
if pred_seg is not None:
vis.add_seg_mask(pred_seg_color)
show_path = osp.join(result_path,
f'{filename}_online.png') if snapshot else None
vis.show(show_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_seg is not None:
_write_obj(gt_seg_color, osp.join(result_path, f'{filename}_gt.obj'))
if pred_seg is not None:
_write_obj(pred_seg_color, osp.join(result_path,
f'{filename}_pred.obj'))
def show_multi_modality_result(img,
gt_bboxes,
pred_bboxes,
proj_mat,
out_dir,
filename,
box_mode='lidar',
img_metas=None,
show=False,
gt_bbox_color=(61, 102, 255),
pred_bbox_color=(241, 101, 72)):
"""Convert multi-modality detection results into 2D results.
Project the predicted 3D bbox to 2D image plane and visualize them.
Args:
img (np.ndarray): The numpy array of image in cv2 fashion.
gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes.
pred_bboxes (:obj:`BaseInstance3DBoxes`): Predicted boxes.
proj_mat (numpy.array, shape=[4, 4]): The projection matrix
according to the camera intrinsic parameters.
out_dir (str): Path of output directory.
filename (str): Filename of the current frame.
box_mode (str, optional): Coordinate system the boxes are in.
Should be one of 'depth', 'lidar' and 'camera'.
Defaults to 'lidar'.
img_metas (dict, optional): Used in projecting depth bbox.
Defaults to None.
show (bool, optional): Visualize the results online. Defaults to False.
gt_bbox_color (str or tuple(int), optional): Color of bbox lines.
The tuple of color should be in BGR order. Default: (255, 102, 61).
pred_bbox_color (str or tuple(int), optional): Color of bbox lines.
The tuple of color should be in BGR order. Default: (72, 101, 241).
"""
if box_mode == 'depth':
| # Copyright (c) OpenMMLab. All rights reserved.
def _write_obj(points, out_filename):
"""Write points into ``obj`` format for meshlab visualization.
Args:
points (np.ndarray): Points in shape (N, dim).
out_filename (str): Filename to be saved.
"""
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
if points.shape[1] == 6:
c = points[i, 3:].astype(int)
fout.write(
'v %f %f %f %d %d %d\n' %
(points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
else:
fout.write('v %f %f %f\n' %
(points[i, 0], points[i, 1], points[i, 2]))
fout.close()
def _write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes.
Args:
scene_bbox(list[ndarray] or ndarray): xyz pos of center and
3 lengths (x_size, y_size, z_size) and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename(str): Filename.
"""
def heading2rotmat(heading_angle):
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if len(scene_bbox) == 0:
scene_bbox = np.zeros((1, 7))
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to obj file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj')
return
def _write_oriented_bbox_v2(corners, labels, out_filename):
colors = np.multiply([
plt.cm.get_cmap('nipy_spectral', 19)((i * 5 + 11) % 18 + 1)[:3] for i in range(18)
], 255).astype(np.uint8).tolist()
with open(out_filename, 'w') as file:
for i, (corner, label) in enumerate(zip(corners, labels)):
c = colors[label]
for p in corner:
file.write(f'v {p[0]} {p[1]} {p[2]} {c[0]} {c[1]} {c[2]}\n')
j = i * 8 + 1
for k in [[0, 1, 2, 3], [4, 5, 6, 7], [0, 1, 5, 4],
[2, 3, 7, 6], [3, 0, 4, 7], [1, 2, 6, 5]]:
file.write('f')
for l in k:
file.write(f' {j + l}')
file.write('\n')
def show_result(points,
gt_bboxes,
pred_bboxes,
out_dir,
filename,
show=False,
snapshot=False,
pred_labels=None):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_bboxes (np.ndarray): Ground truth boxes.
pred_bboxes (np.ndarray): Predicted boxes.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
show (bool, optional): Visualize the results online. Defaults to False.
snapshot (bool, optional): Whether to save the online results.
Defaults to False.
pred_labels (np.ndarray, optional): Predicted labels of boxes.
Defaults to None.
"""
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if show:
vis = Visualizer(points)
if pred_bboxes is not None:
if pred_labels is None:
vis.add_bboxes(bbox3d=pred_bboxes)
else:
palette = np.random.randint(
0, 255, size=(pred_labels.max() + 1, 3)) / 256
labelDict = {}
for j in range(len(pred_labels)):
i = int(pred_labels[j].numpy())
if labelDict.get(i) is None:
labelDict[i] = []
labelDict[i].append(pred_bboxes[j])
for i in labelDict:
vis.add_bboxes(
bbox3d=np.array(labelDict[i]),
bbox_color=palette[i],
points_in_box_color=palette[i])
if gt_bboxes is not None:
vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
show_path = osp.join(result_path,
f'{filename}_online.png') if snapshot else None
vis.show(show_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_bboxes is not None:
# bottom center to gravity center
gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
_write_oriented_bbox(gt_bboxes,
osp.join(result_path, f'{filename}_gt.obj'))
if pred_bboxes is not None:
# bottom center to gravity center
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
_write_oriented_bbox(pred_bboxes,
osp.join(result_path, f'{filename}_pred.obj'))
def show_result_v2(points,
gt_corners,
gt_labels,
pred_corners,
pred_labels,
out_dir,
filename):
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_corners is not None:
_write_oriented_bbox_v2(gt_corners, gt_labels,
osp.join(result_path, f'{filename}_gt.obj'))
if pred_corners is not None:
_write_oriented_bbox_v2(pred_corners, pred_labels,
osp.join(result_path, f'{filename}_pred.obj'))
def show_result_v2_with_superpoint(points,
superpoints,
gt_corners,
gt_labels,
pred_corners,
pred_labels,
out_dir,
filename):
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if superpoints is not None:
map_color = []
for i in range(superpoints.max()+1):
r = np.random.rand(1, 1)
g = np.random.rand(1, 1)
b = np.random.rand(1, 1)
xx = np.concatenate((r, g, b), axis=1) * 255.
map_color.append(xx)
map_color = np.concatenate(map_color, axis=0)
superpoints = map_color[superpoints]
superpoints = np.concatenate([points[:, :3], superpoints],
axis=1)
_write_obj(superpoints, osp.join(result_path, f'{filename}_superpoints.obj'))
if gt_corners is not None:
_write_oriented_bbox_v2(gt_corners, gt_labels,
osp.join(result_path, f'{filename}_gt.obj'))
if pred_corners is not None:
_write_oriented_bbox_v2(pred_corners, pred_labels,
osp.join(result_path, f'{filename}_pred.obj'))
def show_seg_result(points,
gt_seg,
pred_seg,
out_dir,
filename,
palette,
ignore_index=None,
show=False,
snapshot=False):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_seg (np.ndarray): Ground truth segmentation mask.
pred_seg (np.ndarray): Predicted segmentation mask.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
palette (np.ndarray): Mapping between class labels and colors.
ignore_index (int, optional): The label index to be ignored, e.g.
unannotated points. Defaults to None.
show (bool, optional): Visualize the results online. Defaults to False.
snapshot (bool, optional): Whether to save the online results.
Defaults to False.
"""
# we need 3D coordinates to visualize segmentation mask
if gt_seg is not None or pred_seg is not None:
assert points is not None, \
'3D coordinates are required for segmentation visualization'
# filter out ignored points
if gt_seg is not None and ignore_index is not None:
if points is not None:
points = points[gt_seg != ignore_index]
if pred_seg is not None:
pred_seg = pred_seg[gt_seg != ignore_index]
gt_seg = gt_seg[gt_seg != ignore_index]
if gt_seg is not None:
gt_seg_color = palette[gt_seg]
gt_seg_color = np.concatenate([points[:, :3], gt_seg_color], axis=1)
if pred_seg is not None:
pred_seg_color = palette[pred_seg]
pred_seg_color = np.concatenate([points[:, :3], pred_seg_color],
axis=1)
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
# online visualization of segmentation mask
# we show three masks in a row, scene_points, gt_mask, pred_mask
if show:
mode = 'xyzrgb' if points.shape[1] == 6 else 'xyz'
vis = Visualizer(points, mode=mode)
if gt_seg is not None:
vis.add_seg_mask(gt_seg_color)
if pred_seg is not None:
vis.add_seg_mask(pred_seg_color)
show_path = osp.join(result_path,
f'{filename}_online.png') if snapshot else None
vis.show(show_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_seg is not None:
_write_obj(gt_seg_color, osp.join(result_path, f'{filename}_gt.obj'))
if pred_seg is not None:
_write_obj(pred_seg_color, osp.join(result_path,
f'{filename}_pred.obj'))
def show_multi_modality_result(img,
gt_bboxes,
pred_bboxes,
proj_mat,
out_dir,
filename,
box_mode='lidar',
img_metas=None,
show=False,
gt_bbox_color=(61, 102, 255),
pred_bbox_color=(241, 101, 72)):
"""Convert multi-modality detection results into 2D results.
Project the predicted 3D bbox to 2D image plane and visualize them.
Args:
img (np.ndarray): The numpy array of image in cv2 fashion.
gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes.
pred_bboxes (:obj:`BaseInstance3DBoxes`): Predicted boxes.
proj_mat (numpy.array, shape=[4, 4]): The projection matrix
according to the camera intrinsic parameters.
out_dir (str): Path of output directory.
filename (str): Filename of the current frame.
box_mode (str, optional): Coordinate system the boxes are in.
Should be one of 'depth', 'lidar' and 'camera'.
Defaults to 'lidar'.
img_metas (dict, optional): Used in projecting depth bbox.
Defaults to None.
show (bool, optional): Visualize the results online. Defaults to False.
gt_bbox_color (str or tuple(int), optional): Color of bbox lines.
The tuple of color should be in BGR order. Default: (255, 102, 61).
pred_bbox_color (str or tuple(int), optional): Color of bbox lines.
The tuple of color should be in BGR order. Default: (72, 101, 241).
"""
if box_mode == 'depth': | draw_bbox = draw_depth_bbox3d_on_img | 1 | 2023-12-21 12:50:35+00:00 | 8k |
jdejaegh/irm-kmi-ha | custom_components/irm_kmi/coordinator.py | [
{
"identifier": "IrmKmiApiClient",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiClient:\n \"\"\"API client for IRM KMI weather data\"\"\"\n COORD_DECIMALS = 6\n\n def __init__(self, session: aiohttp.ClientSession) -> None:\n self._session = session\n self._base_url = \"https://app.meteo.be/services/appv4/\"\n\n async def get_forecasts_coord(self, coord: dict) -> dict:\n \"\"\"Get forecasts for given city.\"\"\"\n assert 'lat' in coord\n assert 'long' in coord\n coord['lat'] = round(coord['lat'], self.COORD_DECIMALS)\n coord['long'] = round(coord['long'], self.COORD_DECIMALS)\n\n response = await self._api_wrapper(params={\"s\": \"getForecasts\", \"k\": _api_key(\"getForecasts\")} | coord)\n return await response.json()\n\n async def get_image(self, url, params: dict | None = None) -> bytes:\n \"\"\"Get the image at the specified url with the parameters\"\"\"\n r: ClientResponse = await self._api_wrapper(base_url=url, params={} if params is None else params)\n return await r.read()\n\n async def _api_wrapper(\n self,\n params: dict,\n base_url: str | None = None,\n path: str = \"\",\n method: str = \"get\",\n data: dict | None = None,\n headers: dict | None = None,\n ) -> any:\n \"\"\"Get information from the API.\"\"\"\n\n try:\n async with async_timeout.timeout(10):\n response = await self._session.request(\n method=method,\n url=f\"{self._base_url if base_url is None else base_url}{path}\",\n headers=headers,\n json=data,\n params=params\n )\n response.raise_for_status()\n return response\n\n except asyncio.TimeoutError as exception:\n raise IrmKmiApiCommunicationError(\"Timeout error fetching information\") from exception\n except (aiohttp.ClientError, socket.gaierror) as exception:\n raise IrmKmiApiCommunicationError(\"Error fetching information\") from exception\n except Exception as exception: # pylint: disable=broad-except\n raise IrmKmiApiError(f\"Something really wrong happened! {exception}\") from exception"
},
{
"identifier": "IrmKmiApiError",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiError(Exception):\n \"\"\"Exception to indicate a general API error.\"\"\""
},
{
"identifier": "CONF_DARK_MODE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_DARK_MODE: Final = \"dark_mode\""
},
{
"identifier": "CONF_STYLE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "CONF_STYLE: Final = \"style\""
},
{
"identifier": "DOMAIN",
"path": "custom_components/irm_kmi/const.py",
"snippet": "DOMAIN: Final = 'irm_kmi'"
},
{
"identifier": "IRM_KMI_TO_HA_CONDITION_MAP",
"path": "custom_components/irm_kmi/const.py",
"snippet": "IRM_KMI_TO_HA_CONDITION_MAP: Final = {\n (0, 'd'): ATTR_CONDITION_SUNNY,\n (0, 'n'): ATTR_CONDITION_CLEAR_NIGHT,\n (1, 'd'): ATTR_CONDITION_SUNNY,\n (1, 'n'): ATTR_CONDITION_CLEAR_NIGHT,\n (2, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (2, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (3, 'd'): ATTR_CONDITION_PARTLYCLOUDY,\n (3, 'n'): ATTR_CONDITION_PARTLYCLOUDY,\n (4, 'd'): ATTR_CONDITION_POURING,\n (4, 'n'): ATTR_CONDITION_POURING,\n (5, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (5, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (6, 'd'): ATTR_CONDITION_POURING,\n (6, 'n'): ATTR_CONDITION_POURING,\n (7, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (7, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (8, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (8, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (9, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (9, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (10, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (10, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (11, 'd'): ATTR_CONDITION_SNOWY,\n (11, 'n'): ATTR_CONDITION_SNOWY,\n (12, 'd'): ATTR_CONDITION_SNOWY,\n (12, 'n'): ATTR_CONDITION_SNOWY,\n (13, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (13, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (14, 'd'): ATTR_CONDITION_CLOUDY,\n (14, 'n'): ATTR_CONDITION_CLOUDY,\n (15, 'd'): ATTR_CONDITION_CLOUDY,\n (15, 'n'): ATTR_CONDITION_CLOUDY,\n (16, 'd'): ATTR_CONDITION_POURING,\n (16, 'n'): ATTR_CONDITION_POURING,\n (17, 'd'): ATTR_CONDITION_LIGHTNING_RAINY,\n (17, 'n'): ATTR_CONDITION_LIGHTNING_RAINY,\n (18, 'd'): ATTR_CONDITION_RAINY,\n (18, 'n'): ATTR_CONDITION_RAINY,\n (19, 'd'): ATTR_CONDITION_POURING,\n (19, 'n'): ATTR_CONDITION_POURING,\n (20, 'd'): ATTR_CONDITION_SNOWY_RAINY,\n (20, 'n'): ATTR_CONDITION_SNOWY_RAINY,\n (21, 'd'): ATTR_CONDITION_EXCEPTIONAL,\n (21, 'n'): ATTR_CONDITION_EXCEPTIONAL,\n (22, 'd'): ATTR_CONDITION_SNOWY,\n (22, 'n'): ATTR_CONDITION_SNOWY,\n (23, 'd'): ATTR_CONDITION_SNOWY,\n (23, 'n'): ATTR_CONDITION_SNOWY,\n (24, 'd'): ATTR_CONDITION_FOG,\n (24, 'n'): ATTR_CONDITION_FOG,\n (25, 'd'): ATTR_CONDITION_FOG,\n (25, 'n'): ATTR_CONDITION_FOG,\n (26, 'd'): ATTR_CONDITION_FOG,\n (26, 'n'): ATTR_CONDITION_FOG,\n (27, 'd'): ATTR_CONDITION_EXCEPTIONAL,\n (27, 'n'): ATTR_CONDITION_EXCEPTIONAL\n}"
},
{
"identifier": "LANGS",
"path": "custom_components/irm_kmi/const.py",
"snippet": "LANGS: Final = ['en', 'fr', 'nl', 'de']"
},
{
"identifier": "MAP_WARNING_ID_TO_SLUG",
"path": "custom_components/irm_kmi/const.py",
"snippet": "MAP_WARNING_ID_TO_SLUG: Final = {\n 0: 'wind',\n 1: 'rain',\n 2: 'ice_or_snow',\n 3: 'thunder',\n 7: 'fog',\n 9: 'cold',\n 12: 'thunder_wind_rain',\n 13: 'thunderstorm_strong_gusts',\n 14: 'thunderstorm_large_rainfall',\n 15: 'storm_surge',\n 17: 'coldspell'}"
},
{
"identifier": "OPTION_STYLE_SATELLITE",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OPTION_STYLE_SATELLITE: Final = 'satellite_style'"
},
{
"identifier": "OUT_OF_BENELUX",
"path": "custom_components/irm_kmi/const.py",
"snippet": "OUT_OF_BENELUX: Final = [\"außerhalb der Benelux (Brussels)\",\n \"Hors de Belgique (Bxl)\",\n \"Outside the Benelux (Brussels)\",\n \"Buiten de Benelux (Brussel)\"]"
},
{
"identifier": "STYLE_TO_PARAM_MAP",
"path": "custom_components/irm_kmi/const.py",
"snippet": "STYLE_TO_PARAM_MAP: Final = {\n OPTION_STYLE_STD: 1,\n OPTION_STYLE_CONTRAST: 2,\n OPTION_STYLE_YELLOW_RED: 3,\n OPTION_STYLE_SATELLITE: 4\n}"
},
{
"identifier": "AnimationFrameData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class AnimationFrameData(TypedDict, total=False):\n \"\"\"Holds one single frame of the radar camera, along with the timestamp of the frame\"\"\"\n time: datetime | None\n image: bytes | None\n value: float | None\n position: float | None\n position_higher: float | None\n position_lower: float | None"
},
{
"identifier": "CurrentWeatherData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class CurrentWeatherData(TypedDict, total=False):\n \"\"\"Class to hold the currently observable weather at a given location\"\"\"\n condition: str | None\n temperature: float | None\n wind_speed: float | None\n wind_gust_speed: float | None\n wind_bearing: float | str | None\n uv_index: float | None\n pressure: float | None"
},
{
"identifier": "IrmKmiForecast",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class IrmKmiForecast(Forecast):\n \"\"\"Forecast class with additional attributes for IRM KMI\"\"\"\n\n # TODO: add condition_2 as well and evolution to match data from the API?\n # TODO: remove the _fr and _nl to have only one 'text' attribute\n text_fr: str | None\n text_nl: str | None"
},
{
"identifier": "ProcessedCoordinatorData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class ProcessedCoordinatorData(TypedDict, total=False):\n \"\"\"Data class that will be exposed to the entities consuming data from an IrmKmiCoordinator\"\"\"\n current_weather: CurrentWeatherData\n hourly_forecast: List[Forecast] | None\n daily_forecast: List[IrmKmiForecast] | None\n animation: RadarAnimationData\n warnings: List[WarningData] | None"
},
{
"identifier": "RadarAnimationData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class RadarAnimationData(TypedDict, total=False):\n \"\"\"Holds frames and additional data for the animation to be rendered\"\"\"\n sequence: List[AnimationFrameData] | None\n most_recent_image_idx: int | None\n hint: str | None\n unit: str | None\n location: bytes | None\n svg_still: bytes | None\n svg_animated: bytes | None"
},
{
"identifier": "WarningData",
"path": "custom_components/irm_kmi/data.py",
"snippet": "class WarningData(TypedDict, total=False):\n \"\"\"Holds data about a specific warning\"\"\"\n slug: str\n id: int\n level: int\n friendly_name: str\n text: str\n starts_at: datetime\n ends_at: datetime"
},
{
"identifier": "RainGraph",
"path": "custom_components/irm_kmi/rain_graph.py",
"snippet": "class RainGraph:\n def __init__(self,\n animation_data: RadarAnimationData,\n background_image_path: str,\n background_size: (int, int),\n dark_mode: bool = False,\n tz: str = 'UTC',\n svg_width: float = 640,\n inset: float = 20,\n graph_height: float = 150,\n top_text_space: float = 30,\n top_text_y_pos: float = 20,\n bottom_text_space: float = 50,\n bottom_text_y_pos: float = 218,\n auto=True\n ):\n\n self._animation_data: RadarAnimationData = animation_data\n self._background_image_path: str = background_image_path\n self._background_size: (int, int) = background_size\n self._dark_mode: bool = dark_mode\n self._tz = pytz.timezone(tz)\n self._svg_width: float = svg_width\n self._inset: float = inset\n self._graph_height: float = graph_height\n self._top_text_space: float = top_text_space + background_size[1]\n self._top_text_y_pos: float = top_text_y_pos + background_size[1]\n self._bottom_text_space: float = bottom_text_space\n self._bottom_text_y_pos: float = bottom_text_y_pos + background_size[1]\n\n self._frame_count: int = len(self._animation_data['sequence'])\n self._graph_width: float = self._svg_width - 2 * self._inset\n self._graph_bottom: float = self._top_text_space + self._graph_height\n self._svg_height: float = self._graph_height + self._top_text_space + self._bottom_text_space\n self._interval_width: float = self._graph_width / self._frame_count\n self._offset: float = self._inset + self._interval_width / 2\n\n if not (0 <= self._top_text_y_pos <= self._top_text_space):\n raise ValueError(\"It must hold that 0 <= top_text_y_pos <= top_text_space\")\n\n if not (self._graph_bottom <= self._bottom_text_y_pos <= self._graph_bottom + self._bottom_text_space):\n raise ValueError(\"bottom_text_y_pos must be below the graph\")\n\n self._dwg: Drawing = Drawing(size=(self._svg_width, self._svg_height), profile='full')\n self._dwg_save: Drawing\n self._dwg_animated: Drawing\n self._dwg_still: Drawing\n\n if auto:\n self.draw_svg_frame()\n self.draw_hour_bars()\n self.draw_chances_path()\n self.draw_data_line()\n self.write_hint()\n self.insert_background()\n self._dwg_save = copy.deepcopy(self._dwg)\n\n self.draw_current_fame_line()\n self.draw_description_text()\n self.insert_cloud_layer()\n self.draw_location()\n self._dwg_animated = self._dwg\n\n self._dwg = self._dwg_save\n idx = self._animation_data['most_recent_image_idx']\n self.draw_current_fame_line(idx)\n self.draw_description_text(idx)\n self.insert_cloud_layer(idx)\n self.draw_location()\n self._dwg_still = self._dwg\n\n def draw_svg_frame(self):\n \"\"\"Create the global area to draw the other items\"\"\"\n self._dwg.embed_font(name=\"Roboto Medium\", filename='custom_components/irm_kmi/resources/roboto_medium.ttf')\n self._dwg.embed_stylesheet(\"\"\"\n .roboto {\n font-family: \"Roboto Medium\";\n }\n \"\"\")\n\n fill_color = '#393C40' if self._dark_mode else '#385E95'\n self._dwg.add(self._dwg.rect(insert=(0, 0),\n size=(self._svg_width, self._svg_height),\n rx=None, ry=None,\n fill=fill_color, stroke='none'))\n\n def draw_description_text(self, idx: int | None = None):\n \"\"\"For every frame write the amount of precipitation and the time at the top of the graph.\n If idx is set, only do it for the given idx\"\"\"\n\n times = [e['time'].astimezone(tz=self._tz).strftime('%H:%M') for e in\n self._animation_data['sequence']]\n rain_levels = [f\"{e['value']}{self._animation_data['unit']}\" for e in self._animation_data['sequence']]\n\n if idx is not None:\n time = times[idx]\n rain_level = rain_levels[idx]\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n self.write_time_and_rain(paragraph, rain_level, time)\n return\n\n for i in range(self._frame_count):\n time = times[i]\n rain_level = rain_levels[i]\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n values = ['hidden'] * self._frame_count\n values[i] = 'visible'\n\n paragraph.add(Animate(\n attributeName=\"visibility\",\n values=\";\".join(values),\n dur=f\"{self._frame_count * 0.3}s\",\n begin=\"0s\",\n repeatCount=\"indefinite\"\n ))\n\n self.write_time_and_rain(paragraph, rain_level, time)\n\n def write_time_and_rain(self, paragraph, rain_level, time):\n \"\"\"Using the paragraph object, write the time and rain level data\"\"\"\n paragraph.add(self._dwg.text(f\"{time}\", insert=(self._offset, self._top_text_y_pos),\n text_anchor=\"start\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n paragraph.add(self._dwg.text(f\"{rain_level}\", insert=(self._svg_width / 2, self._top_text_y_pos),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n def write_hint(self):\n \"\"\"Add the hint text at the bottom of the graph\"\"\"\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n\n hint = self._animation_data['hint']\n\n paragraph.add(self._dwg.text(f\"{hint}\", insert=(self._svg_width / 2, self._bottom_text_y_pos),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n def draw_chances_path(self):\n \"\"\"Draw the prevision margin area around the main forecast line\"\"\"\n list_lower_points = []\n list_higher_points = []\n\n rain_list: List[AnimationFrameData] = self._animation_data['sequence']\n graph_rect_left = self._offset\n graph_rect_top = self._top_text_space\n\n for i in range(len(rain_list)):\n position_higher = rain_list[i]['position_higher']\n if position_higher is not None:\n list_higher_points.append((graph_rect_left, graph_rect_top + (\n 1.0 - position_higher) * self._graph_height))\n graph_rect_left += self._interval_width\n\n graph_rect_right = graph_rect_left - self._interval_width\n for i in range(len(rain_list) - 1, -1, -1):\n position_lower = rain_list[i]['position_lower']\n if position_lower is not None:\n list_lower_points.append((graph_rect_right, graph_rect_top + (\n 1.0 - position_lower) * self._graph_height))\n graph_rect_right -= self._interval_width\n\n if list_higher_points and list_lower_points:\n self.draw_chance_precip(list_higher_points, list_lower_points)\n\n def draw_chance_precip(self, list_higher_points: List, list_lower_points: List):\n \"\"\"Draw the blue solid line representing the actual rain forecast\"\"\"\n precip_higher_chance_path = self._dwg.path(fill='#63c8fa', stroke='none', opacity=.3)\n\n list_higher_points[-1] = tuple(list(list_higher_points[-1]) + ['last'])\n\n self.set_curved_path(precip_higher_chance_path, list_higher_points + list_lower_points)\n self._dwg.add(precip_higher_chance_path)\n\n @staticmethod\n def set_curved_path(path, points):\n \"\"\"Pushes points on the path by creating a nice curve between them\"\"\"\n if len(points) < 2:\n return\n\n path.push('M', *points[0])\n\n for i in range(1, len(points)):\n x_mid = (points[i - 1][0] + points[i][0]) / 2\n y_mid = (points[i - 1][1] + points[i][1]) / 2\n\n path.push('Q', points[i - 1][0], points[i - 1][1], x_mid, y_mid)\n if points[i][-1] == 'last' or points[i - 1][-1] == 'last':\n path.push('Q', points[i][0], points[i][1], points[i][0], points[i][1])\n\n path.push('Q', points[-1][0], points[-1][1], points[-1][0], points[-1][1])\n\n def draw_data_line(self):\n \"\"\"Draw the main data line for the rain forecast\"\"\"\n rain_list: List[AnimationFrameData] = self._animation_data['sequence']\n graph_rect_left = self._offset\n graph_rect_top = self._top_text_space\n\n entry_list = []\n\n for i in range(len(rain_list)):\n position = rain_list[i]['position']\n entry_list.append(\n (graph_rect_left,\n graph_rect_top + (1.0 - position) * self._graph_height))\n graph_rect_left += self._interval_width\n data_line_path = self._dwg.path(fill='none', stroke='#63c8fa', stroke_width=2)\n self.set_curved_path(data_line_path, entry_list)\n self._dwg.add(data_line_path)\n\n def draw_hour_bars(self):\n \"\"\"Draw the small bars at the bottom to represent the time\"\"\"\n hour_bar_height = 8\n horizontal_inset = self._offset\n\n for (i, rain_item) in enumerate(self._animation_data['sequence']):\n time_image = rain_item['time'].astimezone(tz=self._tz)\n is_hour_bar = time_image.minute == 0\n\n x_position = horizontal_inset\n if i == self._animation_data['most_recent_image_idx']:\n self._dwg.add(self._dwg.line(start=(x_position, self._top_text_space),\n end=(x_position, self._graph_bottom),\n stroke='white',\n opacity=0.5,\n stroke_dasharray=4))\n\n self._dwg.add(self._dwg.line(start=(x_position, self._graph_bottom - hour_bar_height),\n end=(x_position, self._graph_bottom),\n stroke='white' if is_hour_bar else 'lightgrey',\n opacity=0.9 if is_hour_bar else 0.7))\n\n if is_hour_bar:\n graph_rect_center_x = x_position\n graph_rect_center_y = self._graph_bottom + 18\n\n paragraph = self._dwg.add(self._dwg.g(class_=\"roboto\", ))\n paragraph.add(self._dwg.text(f\"{time_image.hour}h\", insert=(graph_rect_center_x, graph_rect_center_y),\n text_anchor=\"middle\",\n font_size=\"16px\",\n fill=\"white\",\n stroke='none'))\n\n horizontal_inset += self._interval_width\n\n self._dwg.add(self._dwg.line(start=(self._offset, self._graph_bottom),\n end=(self._graph_width + self._interval_width / 2, self._graph_bottom),\n stroke='white'))\n\n def draw_current_fame_line(self, idx: int | None = None):\n \"\"\"Draw a solid white line on the timeline at the position of the given frame index\"\"\"\n x_position = self._offset if idx is None else self._offset + idx * self._interval_width\n now = self._dwg.add(self._dwg.line(start=(x_position, self._top_text_space),\n end=(x_position, self._graph_bottom),\n id='now',\n stroke='white',\n opacity=1,\n stroke_width=2))\n if idx is not None:\n return\n now.add(self._dwg.animateTransform(\"translate\", \"transform\",\n id=\"now\",\n from_=f\"{self._offset} 0\",\n to=f\"{self._graph_width - self._offset} 0\",\n dur=f\"{self._frame_count * 0.3}s\",\n repeatCount=\"indefinite\"))\n\n def get_svg_string(self, still_image: bool = False) -> bytes:\n return self._dwg_still.tostring().encode() if still_image else self._dwg_animated.tostring().encode()\n\n def insert_background(self):\n with open(self._background_image_path, 'rb') as f:\n png_data = base64.b64encode(f.read()).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n def insert_cloud_layer(self, idx: int | None = None):\n imgs = [e['image'] for e in self._animation_data['sequence']]\n\n if idx is not None:\n img = imgs[idx]\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n return\n\n for i, img in enumerate(imgs):\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n values = ['hidden'] * self._frame_count\n values[i] = 'visible'\n\n image.add(Animate(\n attributeName=\"visibility\",\n values=\";\".join(values),\n dur=f\"{self._frame_count * 0.3}s\",\n begin=\"0s\",\n repeatCount=\"indefinite\"\n ))\n\n def draw_location(self):\n img = self._animation_data['location']\n png_data = base64.b64encode(img).decode('utf-8')\n image = self._dwg.image(\"data:image/png;base64,\" + png_data, insert=(0, 0), size=self._background_size)\n self._dwg.add(image)\n\n def get_dwg(self):\n return copy.deepcopy(self._dwg)"
},
{
"identifier": "disable_from_config",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def disable_from_config(hass: HomeAssistant, config_entry: ConfigEntry):\n modify_from_config(hass, config_entry.entry_id, False)"
},
{
"identifier": "get_config_value",
"path": "custom_components/irm_kmi/utils.py",
"snippet": "def get_config_value(config_entry: ConfigEntry, key: str) -> Any:\n if config_entry.options and key in config_entry.options:\n return config_entry.options[key]\n return config_entry.data[key]"
}
] | import asyncio
import logging
import async_timeout
import pytz
from datetime import datetime, timedelta
from typing import Any, List, Tuple
from homeassistant.components.weather import Forecast
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_ZONE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import issue_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (DataUpdateCoordinator,
UpdateFailed)
from .api import IrmKmiApiClient, IrmKmiApiError
from .const import CONF_DARK_MODE, CONF_STYLE, DOMAIN
from .const import IRM_KMI_TO_HA_CONDITION_MAP as CDT_MAP
from .const import LANGS
from .const import MAP_WARNING_ID_TO_SLUG as SLUG_MAP
from .const import OPTION_STYLE_SATELLITE, OUT_OF_BENELUX, STYLE_TO_PARAM_MAP
from .data import (AnimationFrameData, CurrentWeatherData, IrmKmiForecast,
ProcessedCoordinatorData, RadarAnimationData, WarningData)
from .rain_graph import RainGraph
from .utils import disable_from_config, get_config_value | 6,533 | """DataUpdateCoordinator for the IRM KMI integration."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiCoordinator(DataUpdateCoordinator):
"""Coordinator to update data from IRM KMI"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize the coordinator."""
super().__init__(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="IRM KMI weather",
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=7),
)
self._api_client = IrmKmiApiClient(session=async_get_clientsession(hass))
self._zone = get_config_value(entry, CONF_ZONE)
| """DataUpdateCoordinator for the IRM KMI integration."""
_LOGGER = logging.getLogger(__name__)
class IrmKmiCoordinator(DataUpdateCoordinator):
"""Coordinator to update data from IRM KMI"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Initialize the coordinator."""
super().__init__(
hass,
_LOGGER,
# Name of the data. For logging purposes.
name="IRM KMI weather",
# Polling interval. Will only be polled if there are subscribers.
update_interval=timedelta(minutes=7),
)
self._api_client = IrmKmiApiClient(session=async_get_clientsession(hass))
self._zone = get_config_value(entry, CONF_ZONE) | self._dark_mode = get_config_value(entry, CONF_DARK_MODE) | 2 | 2023-12-17 16:35:01+00:00 | 8k |
v3ucn/Bert-vits2-V2.2 | webui.py | [
{
"identifier": "split_by_language",
"path": "tools/sentence.py",
"snippet": "def split_by_language(text: str, target_languages: list = None) -> list:\n pattern = (\n r\"[\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\>\\=\\?\\@\\[\\]\\{\\}\\\\\\\\\\^\\_\\`\"\n r\"\\!?\\。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」\"\n r\"『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘\\'\\‛\\“\\”\\„\\‟…‧﹏.]+\"\n )\n sentences = re.split(pattern, text)\n\n pre_lang = \"\"\n start = 0\n end = 0\n sentences_list = []\n\n if target_languages is not None:\n sorted_target_languages = sorted(target_languages)\n if sorted_target_languages in [[\"en\", \"zh\"], [\"en\", \"ja\"], [\"en\", \"ja\", \"zh\"]]:\n new_sentences = []\n for sentence in sentences:\n new_sentences.extend(split_alpha_nonalpha(sentence))\n sentences = new_sentences\n\n for sentence in sentences:\n if check_is_none(sentence):\n continue\n\n lang = classify_language(sentence, target_languages)\n\n end += text[end:].index(sentence)\n if pre_lang != \"\" and pre_lang != lang:\n sentences_list.append((text[start:end], pre_lang))\n start = end\n end += len(sentence)\n pre_lang = lang\n sentences_list.append((text[start:], pre_lang))\n\n return sentences_list"
},
{
"identifier": "infer",
"path": "infer.py",
"snippet": "def get_net_g(model_path: str, version: str, device: str, hps):\ndef get_text(text, language_str, hps, device):\ndef infer(\n text,\n emotion,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n sid,\n language,\n hps,\n net_g,\n device,\n reference_audio=None,\n skip_start=False,\n skip_end=False,\n):\ndef infer_multilang(\n text,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n sid,\n language,\n hps,\n net_g,\n device,\n reference_audio=None,\n emotion=None,\n skip_start=False,\n skip_end=False,\n):"
},
{
"identifier": "config",
"path": "config.py",
"snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):"
},
{
"identifier": "translate",
"path": "tools/translate.py",
"snippet": "def translate(Sentence: str, to_Language: str = \"jp\", from_Language: str = \"\"):\n \"\"\"\n :param Sentence: 待翻译语句\n :param from_Language: 待翻译语句语言\n :param to_Language: 目标语言\n :return: 翻译后语句 出错时返回None\n\n 常见语言代码:中文 zh 英语 en 日语 jp\n \"\"\"\n appid = config.translate_config.app_key\n key = config.translate_config.secret_key\n if appid == \"\" or key == \"\":\n return \"请开发者在config.yml中配置app_key与secret_key\"\n url = \"https://fanyi-api.baidu.com/api/trans/vip/translate\"\n texts = Sentence.splitlines()\n outTexts = []\n for t in texts:\n if t != \"\":\n # 签名计算 参考文档 https://api.fanyi.baidu.com/product/113\n salt = str(random.randint(1, 100000))\n signString = appid + t + salt + key\n hs = hashlib.md5()\n hs.update(signString.encode(\"utf-8\"))\n signString = hs.hexdigest()\n if from_Language == \"\":\n from_Language = \"auto\"\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n payload = {\n \"q\": t,\n \"from\": from_Language,\n \"to\": to_Language,\n \"appid\": appid,\n \"salt\": salt,\n \"sign\": signString,\n }\n # 发送请求\n try:\n response = requests.post(\n url=url, data=payload, headers=headers, timeout=3\n )\n response = response.json()\n if \"trans_result\" in response.keys():\n result = response[\"trans_result\"][0]\n if \"dst\" in result.keys():\n dst = result[\"dst\"]\n outTexts.append(dst)\n except Exception:\n return Sentence\n else:\n outTexts.append(t)\n return \"\\n\".join(outTexts)"
}
] | import os
import logging
import re_matching
import torch
import utils
import gradio as gr
import webbrowser
import numpy as np
import librosa
from tools.sentence import split_by_language
from infer import infer, latest_version, get_net_g, infer_multilang
from config import config
from tools.translate import translate | 3,828 | net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio_list_sent.append(audio)
silence = np.zeros((int)(44100 * interval_between_sent))
audio_list_sent.append(silence)
if (interval_between_para - interval_between_sent) > 0:
silence = np.zeros(
(int)(44100 * (interval_between_para - interval_between_sent))
)
audio_list_sent.append(silence)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(
np.concatenate(audio_list_sent)
) # 对完整句子做音量归一
audio_list.append(audio16bit)
audio_concat = np.concatenate(audio_list)
return ("Success", (44100, audio_concat))
def tts_fn(
text: str,
speaker,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
language,
reference_audio,
emotion,
prompt_mode,
):
if prompt_mode == "Audio prompt":
if reference_audio == None:
return ("Invalid audio prompt", None)
else:
reference_audio = load_audio(reference_audio)[1]
else:
reference_audio = None
audio_list = []
if language == "mix":
bool_valid, str_valid = re_matching.validate_text(text)
if not bool_valid:
return str_valid, (
hps.data.sampling_rate,
np.concatenate([np.zeros(hps.data.sampling_rate // 2)]),
)
result = []
for slice in re_matching.text_matching(text):
_speaker = slice.pop()
temp_contant = []
temp_lang = []
for lang, content in slice:
if "|" in content:
temp = []
temp_ = []
for i in content.split("|"):
if i != "":
temp.append([i])
temp_.append([lang])
else:
temp.append([])
temp_.append([])
temp_contant += temp
temp_lang += temp_
else:
if len(temp_contant) == 0:
temp_contant.append([])
temp_lang.append([])
temp_contant[-1].append(content)
temp_lang[-1].append(lang)
for i, j in zip(temp_lang, temp_contant):
result.append([*zip(i, j), _speaker])
for i, one in enumerate(result):
skip_start = i != 0
skip_end = i != len(result) - 1
_speaker = one.pop()
idx = 0
while idx < len(one):
text_to_generate = []
lang_to_generate = []
while True:
lang, content = one[idx]
temp_text = [content]
if len(text_to_generate) > 0:
text_to_generate[-1] += [temp_text.pop(0)]
lang_to_generate[-1] += [lang]
if len(temp_text) > 0:
text_to_generate += [[i] for i in temp_text]
lang_to_generate += [[lang]] * len(temp_text)
if idx + 1 < len(one):
idx += 1
else:
break
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(one) - 1) and skip_end
print(text_to_generate, lang_to_generate)
audio_list.extend(
generate_audio_multilang(
text_to_generate,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
_speaker,
lang_to_generate,
reference_audio,
emotion,
skip_start,
skip_end,
)
)
idx += 1
elif language.lower() == "auto":
for idx, slice in enumerate(text.split("|")):
if slice == "":
continue
skip_start = idx != 0
skip_end = idx != len(text.split("|")) - 1
| # flake8: noqa: E402
logging.getLogger("numba").setLevel(logging.WARNING)
logging.getLogger("markdown_it").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.basicConfig(
level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
net_g = None
device = config.webui_config.device
if device == "mps":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
def generate_audio(
slices,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
speaker,
language,
reference_audio,
emotion,
skip_start=False,
skip_end=False,
):
audio_list = []
# silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
with torch.no_grad():
for idx, piece in enumerate(slices):
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(slices) - 1) and skip_end
audio = infer(
piece,
reference_audio=reference_audio,
emotion=emotion,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language=language,
hps=hps,
net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
audio_list.append(audio16bit)
# audio_list.append(silence) # 将静音添加到列表中
return audio_list
def generate_audio_multilang(
slices,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
speaker,
language,
reference_audio,
emotion,
skip_start=False,
skip_end=False,
):
audio_list = []
# silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16)
with torch.no_grad():
for idx, piece in enumerate(slices):
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(slices) - 1) and skip_end
audio = infer_multilang(
piece,
reference_audio=reference_audio,
emotion=emotion,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language=language[idx],
hps=hps,
net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
audio_list.append(audio16bit)
# audio_list.append(silence) # 将静音添加到列表中
return audio_list
def tts_split(
text: str,
speaker,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
language,
cut_by_sent,
interval_between_para,
interval_between_sent,
reference_audio,
emotion,
):
if language == "mix":
return ("invalid", None)
while text.find("\n\n") != -1:
text = text.replace("\n\n", "\n")
para_list = re_matching.cut_para(text)
audio_list = []
if not cut_by_sent:
for idx, p in enumerate(para_list):
skip_start = idx != 0
skip_end = idx != len(para_list) - 1
audio = infer(
p,
reference_audio=reference_audio,
emotion=emotion,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language=language,
hps=hps,
net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(audio)
audio_list.append(audio16bit)
silence = np.zeros((int)(44100 * interval_between_para), dtype=np.int16)
audio_list.append(silence)
else:
for idx, p in enumerate(para_list):
skip_start = idx != 0
skip_end = idx != len(para_list) - 1
audio_list_sent = []
sent_list = re_matching.cut_sent(p)
for idx, s in enumerate(sent_list):
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(sent_list) - 1) and skip_end
audio = infer(
s,
reference_audio=reference_audio,
emotion=emotion,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language=language,
hps=hps,
net_g=net_g,
device=device,
skip_start=skip_start,
skip_end=skip_end,
)
audio_list_sent.append(audio)
silence = np.zeros((int)(44100 * interval_between_sent))
audio_list_sent.append(silence)
if (interval_between_para - interval_between_sent) > 0:
silence = np.zeros(
(int)(44100 * (interval_between_para - interval_between_sent))
)
audio_list_sent.append(silence)
audio16bit = gr.processing_utils.convert_to_16_bit_wav(
np.concatenate(audio_list_sent)
) # 对完整句子做音量归一
audio_list.append(audio16bit)
audio_concat = np.concatenate(audio_list)
return ("Success", (44100, audio_concat))
def tts_fn(
text: str,
speaker,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
language,
reference_audio,
emotion,
prompt_mode,
):
if prompt_mode == "Audio prompt":
if reference_audio == None:
return ("Invalid audio prompt", None)
else:
reference_audio = load_audio(reference_audio)[1]
else:
reference_audio = None
audio_list = []
if language == "mix":
bool_valid, str_valid = re_matching.validate_text(text)
if not bool_valid:
return str_valid, (
hps.data.sampling_rate,
np.concatenate([np.zeros(hps.data.sampling_rate // 2)]),
)
result = []
for slice in re_matching.text_matching(text):
_speaker = slice.pop()
temp_contant = []
temp_lang = []
for lang, content in slice:
if "|" in content:
temp = []
temp_ = []
for i in content.split("|"):
if i != "":
temp.append([i])
temp_.append([lang])
else:
temp.append([])
temp_.append([])
temp_contant += temp
temp_lang += temp_
else:
if len(temp_contant) == 0:
temp_contant.append([])
temp_lang.append([])
temp_contant[-1].append(content)
temp_lang[-1].append(lang)
for i, j in zip(temp_lang, temp_contant):
result.append([*zip(i, j), _speaker])
for i, one in enumerate(result):
skip_start = i != 0
skip_end = i != len(result) - 1
_speaker = one.pop()
idx = 0
while idx < len(one):
text_to_generate = []
lang_to_generate = []
while True:
lang, content = one[idx]
temp_text = [content]
if len(text_to_generate) > 0:
text_to_generate[-1] += [temp_text.pop(0)]
lang_to_generate[-1] += [lang]
if len(temp_text) > 0:
text_to_generate += [[i] for i in temp_text]
lang_to_generate += [[lang]] * len(temp_text)
if idx + 1 < len(one):
idx += 1
else:
break
skip_start = (idx != 0) and skip_start
skip_end = (idx != len(one) - 1) and skip_end
print(text_to_generate, lang_to_generate)
audio_list.extend(
generate_audio_multilang(
text_to_generate,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
_speaker,
lang_to_generate,
reference_audio,
emotion,
skip_start,
skip_end,
)
)
idx += 1
elif language.lower() == "auto":
for idx, slice in enumerate(text.split("|")):
if slice == "":
continue
skip_start = idx != 0
skip_end = idx != len(text.split("|")) - 1 | sentences_list = split_by_language( | 0 | 2023-12-18 04:54:46+00:00 | 8k |
d-krupke/CP-SAT-Log-Analyzer | tests/test_examples.py | [
{
"identifier": "LogParser",
"path": "cpsat_log_parser/parser.py",
"snippet": "class LogParser:\n def __init__(self, log: typing.Union[str, typing.List[str]]) -> None:\n self.comments, log_without_comments = self._extract_comments(log)\n self.blocks = self.parse_blocks(log_without_comments)\n\n def parse_blocks(\n self, log: typing.Union[str, typing.List[str]]\n ) -> typing.List[LogBlock]:\n \"\"\"\n Parse a log into its blocks.\n \"\"\"\n blocks = []\n sub_parser = ALL_BLOCKS\n for data in _split_log(log):\n for parser in sub_parser:\n if parser.matches(data):\n blocks.append(parser(data))\n break\n else:\n raise ValueError(f\"Could not parse data: {data}\")\n return blocks\n\n def _extract_comments(\n self, log: typing.Union[str, typing.List[str]]\n ) -> typing.Tuple[typing.List[str], typing.List[str]]:\n \"\"\"\n Extract the comments from a log.\n \"\"\"\n if isinstance(log, str):\n log = log.split(\"\\n\")\n if not isinstance(log, list):\n raise TypeError(\"log must be a list or a string\")\n comments = []\n data = []\n for line in log:\n if line.startswith(\"//\"):\n comments.append(line[2:].strip())\n else:\n data.append(line)\n return comments, data\n\n def get_block_of_type(self, block_type: typing.Type[LogBlock]) -> LogBlock:\n for block in self.blocks:\n if isinstance(block, block_type):\n return block\n raise KeyError(f\"Could not find block '{block_type.__name__}'\")"
},
{
"identifier": "SearchProgressBlock",
"path": "cpsat_log_parser/blocks/search_progress.py",
"snippet": "class SearchProgressBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n lines = [line.strip() for line in lines if line.strip()]\n if not lines:\n raise ValueError(\"No lines to parse\")\n if not self.matches(lines):\n raise ValueError(\"Lines do not match SearchProgressBlock\")\n self.lines = lines\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().lower().startswith(\"Starting search\".lower())\n\n def _parse_events(\n self,\n ) -> typing.List[typing.Union[BoundEvent, ObjEvent, ModelEvent]]:\n \"\"\"\n Parse the log file into a list of BoundEvent and ObjEvent.\n \"\"\"\n events = []\n for line in self.lines:\n obj_event = ObjEvent.parse(line)\n if obj_event:\n events.append(obj_event)\n continue\n bound_event = BoundEvent.parse(line)\n if bound_event:\n events.append(bound_event)\n continue\n model_event = ModelEvent.parse(line)\n if model_event:\n events.append(model_event)\n continue\n return events\n\n def get_presolve_time(self) -> float:\n # first line looks like this \"Starting search at 16.74s with 24 workers.\"\n m = re.match(\n r\"Starting [Ss]earch at (?P<time>\\d+\\.\\d+s) with \\d+ workers.\",\n self.lines[0],\n )\n if m:\n return parse_time(m.group(\"time\"))\n raise ValueError(f\"Could not parse presolve time from '{self.lines[0]}'\")\n\n def get_title(self) -> str:\n return \"Search progress:\"\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\nThe search progress log is an essential element of the overall log, crucial for identifying performance bottlenecks. It clearly demonstrates the solver's progression over time and pinpoints where it faces significant challenges. It is important to discern whether the upper or lower bounds are causing issues, or if the solver initially finds a near-optimal solution but struggles to minimize a small remaining gap.\n\nThe structure of the log entries is standardized as follows:\n\n`EVENT NAME\\t|\\tTIME\\t|\\tBEST SOLUTION\\t|\\tRANGE OF THE SEARCH\\t|\\tCOMMENT`\n\nFor instance, an event marked `#2` indicates the discovery of the second solution. Here, you will observe an improvement in the `BEST SOLUTION` metric. A notation like `best:16` confirms that the solver has found a solution with a value of 16.\n\nAn event with `#Bound` denotes an enhancement in the bound, as seen by a reduction in the `RANGE OF THE SEARCH`. A detail such as `next:[7,14]` signifies that the solver is now focused on finding a solution valued between 7 and 14.\n\nThe `COMMENT` section provides essential information about the strategies that led to these improvements.\n\nEvents labeled `#Model` signal modifications to the model, such as fixing certain variables.\n\nTo fully grasp the nuances, zooming into the plot is necessary, especially since the initial values can be quite large. A thorough examination of which sections of the process converge quickest is crucial for a comprehensive understanding.\n \"\"\"\n\n def gap_as_plotly(self) -> typing.Optional[go.Figure]:\n gap_events = [\n e\n for e in self._parse_events()\n if isinstance(e, BoundEvent) or isinstance(e, ObjEvent)\n ]\n\n def is_valid_gap(gap):\n if gap is None:\n return False\n if not math.isfinite(gap):\n return False\n return True\n\n gaps = [(e.time, e.get_gap()) for e in gap_events if is_valid_gap(e.get_gap())]\n fig = go.Figure()\n if not gap_events:\n return None\n # add gaps\n fig.add_trace(\n go.Scatter(\n x=[t for t, _ in gaps],\n y=[gap for _, gap in gaps],\n mode=\"lines+markers\",\n line=dict(color=\"purple\"),\n name=\"Gap\",\n hovertext=[e.msg for e in gap_events],\n )\n )\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * gaps[-1][0]])\n max_gap = max(gap for _, gap in gaps if gap is not None)\n # make the y-axis start at 0\n fig.update_yaxes(range=[-1, min(300, 1.01 * max_gap)])\n fig.update_layout(\n title=\"Optimality Gap\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Gap (%)\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig\n\n def model_changes_as_plotly(self) -> typing.Optional[go.Figure]:\n \"\"\"\n Plot the model changes in percent over time.\n \"\"\"\n model_events = [e for e in self._parse_events() if isinstance(e, ModelEvent)]\n fig = go.Figure()\n if not model_events:\n return None\n # add number of vars\n fig.add_trace(\n go.Scatter(\n x=[e.time for e in model_events],\n y=[100 * (e.vars_remaining / e.vars) for e in model_events],\n mode=\"lines+markers\",\n line=dict(color=\"green\"),\n name=\"Variables\",\n hovertext=[e.msg for e in model_events],\n )\n )\n # add number of constraints\n fig.add_trace(\n go.Scatter(\n x=[e.time for e in model_events],\n y=[100 * (e.constr_remaining / e.constr) for e in model_events],\n mode=\"lines+markers\",\n line=dict(color=\"orange\"),\n name=\"Constraints\",\n hovertext=[e.msg for e in model_events],\n )\n )\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * model_events[-1].time])\n # make the y-axis range from 0 to 100\n fig.update_yaxes(range=[0, 101])\n fig.update_layout(\n title=\"Model changes\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Remaining (%)\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig\n\n def as_plotly(self) -> typing.Optional[go.Figure]:\n \"\"\"\n Plot the progress of the solver.\n \"\"\"\n events = self._parse_events()\n obj_events = [e for e in events if isinstance(e, ObjEvent)]\n bound_events = [e for e in events if isinstance(e, BoundEvent)]\n fig = go.Figure()\n if not obj_events and not bound_events:\n return None\n max_time = max([e.time for e in bound_events + obj_events])\n\n # make sure that both bounds and objs have a value at max_time\n if obj_events and obj_events[-1].time < max_time:\n if bound_events[-1].obj is None:\n # Should nearly never happen\n obj_events.append(\n ObjEvent(\n time=max_time,\n obj=obj_events[-1].obj,\n bound=bound_events[-1].bound,\n msg=\"\",\n )\n )\n else:\n obj_events.append(\n ObjEvent(\n time=max_time,\n obj=bound_events[-1].obj,\n bound=bound_events[-1].bound,\n msg=\"\",\n )\n )\n if bound_events and bound_events[-1].time < max_time:\n bound_events.append(\n BoundEvent(\n time=max_time,\n obj=obj_events[-1].obj,\n bound=obj_events[-1].bound,\n msg=\"\",\n )\n )\n\n # plot the bounds over time. Add the comment as hover text\n fig.add_trace(\n go.Scatter(\n x=[b.time for b in bound_events],\n y=[b.bound for b in bound_events],\n mode=\"lines+markers\",\n line=dict(color=\"cyan\"),\n name=\"Bound\",\n hovertext=[b.msg for b in bound_events],\n )\n )\n\n # plot the objective values over time. Add the comment as hover text\n fig.add_trace(\n go.Scatter(\n x=[o.time for o in obj_events],\n y=[o.obj for o in obj_events],\n mode=\"lines+markers\",\n line=dict(color=\"red\"),\n name=\"Objective\",\n hovertext=[o.msg for o in obj_events],\n )\n )\n\n # make the x-axis start at 0\n fig.update_xaxes(range=[0, 1.01 * max_time])\n fig.update_layout(\n title=\"Search progress\",\n xaxis_title=\"Time (s)\",\n yaxis_title=\"Objective\",\n legend_title=\"Legend\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"RebeccaPurple\"),\n )\n return fig"
},
{
"identifier": "SolverBlock",
"path": "cpsat_log_parser/blocks/solver.py",
"snippet": "class SolverBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n def _parse_parameters(self, line: str) -> typing.Dict:\n \"\"\"\n\n The parameters line can look like this:\n \"Parameters: log_search_progress: true use_timetabling_in_no_overlap_2d: true use_energetic_reasoning_in_no_overlap_2d: true use_pairwise_reasoning_in_no_overlap_2d: true\"\n \"\"\"\n parameters = {}\n line = line[len(\"Parameters:\") :]\n for match in re.finditer(r\"(?P<key>\\w+): (?P<value>[^ ]+)\", line):\n parameters[match.group(\"key\")] = match.group(\"value\")\n return parameters\n\n def get_title(self) -> str:\n return \"Solver Information\"\n\n def get_help(self) -> str:\n return \"\"\"This block contains basic information about the solver.\n As CP-SAT is still under active development and makes serious improvements with every release, it is important to know which version of the solver was used.\n The number of workers, i.e., the level of parallelism, is also important to know.\n CP-SAT is a portfolio solver and the higher the number of workers, the more strategies are used.\n You can find an overview of the different tiers activated by the number of workers in the [CP-SAT documentation](https://github.com/google/or-tools/blob/main/ortools/sat/docs/troubleshooting.md#improving-performance-with-multiple-workers).\n While you should be careful with tinkering with the parameters (they have sensible defaults), it is still good to know which parameters were used.\n All of these information are actually already shown in the overview.\n \"\"\"\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].strip().startswith(\"Starting CP-SAT solver\")\n\n def get_parameters(self) -> typing.Dict:\n for line in self.lines:\n if line.startswith(\"Parameters:\"):\n return self._parse_parameters(line)\n raise ValueError(\"No parameters found\")\n\n def get_number_of_workers(self) -> int:\n # the line looks like this: \"Setting number of workers to 24\"\n for line in self.lines:\n if line.startswith(\"Setting number of workers to\"):\n return int(line.strip().split(\" \")[-1])\n # If `num_search_workers` is set, the number of workers is not shown in the log.\n if \"num_search_workers\" in self.get_parameters():\n return int(self.get_parameters()[\"num_search_workers\"])\n raise ValueError(\"No number of workers found\")\n\n def get_version(self) -> str:\n # the line looks like this: \"Starting CP-SAT solver v9.7.2996\"\n for line in self.lines:\n if line.startswith(\"Starting CP-SAT solver\"):\n return line.strip().split(\" \")[-1]\n raise ValueError(\"No version found\")\n\n def get_parsed_version(self) -> typing.Tuple[int, int, int]:\n # the line looks like this: \"Starting CP-SAT solver v9.7.2996\"\n version = self.get_version()[1:]\n major, minor, patch = version.split(\".\")\n return int(major), int(minor), int(patch)"
},
{
"identifier": "ResponseBlock",
"path": "cpsat_log_parser/blocks/solver_response.py",
"snippet": "class ResponseBlock(LogBlock):\n def __init__(self, lines: typing.List[str]) -> None:\n super().__init__(lines)\n\n @staticmethod\n def matches(lines: typing.List[str]) -> bool:\n if not lines:\n return False\n return lines[0].startswith(\"CpSolverResponse\")\n\n def get_title(self) -> str:\n return \"CpSolverResponse\"\n\n def to_dict(self) -> dict:\n d = {}\n for line in self.lines:\n if line.startswith(\"CpSolverResponse\"):\n continue\n key, value = line.split(\":\")\n key = key.strip()\n value = value.strip()\n if key == \"status\":\n value = value.split(\" \")[0]\n d[key] = value\n return d\n\n def get_gap(self):\n vals = self.to_dict()\n try:\n obj = float(vals[\"objective\"])\n bound = float(vals[\"best_bound\"])\n except TypeError:\n return None\n except ValueError:\n return None\n return 100 * (abs(obj - bound) / max(1, abs(obj)))\n\n def to_pandas(self) -> pd.DataFrame:\n return pd.DataFrame([self.to_dict()])\n\n def get_help(self) -> typing.Optional[str]:\n return \"\"\"\n This final block of the log contains a summary by the solver.\n Here you find the most important information, such as how successful the search was.\n\n You can find the original documentation [here](https://github.com/google/or-tools/blob/8768ed7a43f8899848effb71295a790f3ecbe2f2/ortools/sat/cp_model.proto#L720).\n \"\"\""
}
] | import os
import sys
from cpsat_log_parser import LogParser
from cpsat_log_parser.blocks import (
SearchProgressBlock,
SolverBlock,
ResponseBlock,
) | 3,942 |
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), "../example_logs")
def test_all_examples():
for file in os.listdir(EXAMPLE_DIR):
if file.endswith(".txt"):
with open(os.path.join(EXAMPLE_DIR, file)) as f:
print(f"Testing {file}")
data = f.read()
|
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
EXAMPLE_DIR = os.path.join(os.path.dirname(__file__), "../example_logs")
def test_all_examples():
for file in os.listdir(EXAMPLE_DIR):
if file.endswith(".txt"):
with open(os.path.join(EXAMPLE_DIR, file)) as f:
print(f"Testing {file}")
data = f.read() | parser = LogParser(data) | 0 | 2023-12-18 09:18:19+00:00 | 8k |
KatantDev/YMdantic | ymdantic/models/tracks/track.py | [
{
"identifier": "DeprecatedMixin",
"path": "ymdantic/mixins.py",
"snippet": "class DeprecatedMixin:\n \"\"\"Миксин, удаляющий устаревшие поля из модели.\"\"\"\n\n @model_validator(mode=\"before\")\n def remove_deprecated(cls, obj: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Удаляет устаревшие поля из модели.\n\n :param obj: Словарь с данными модели.\n :return: Словарь с данными модели без устаревших полей.\n \"\"\"\n obj.pop(\"substituted\", None)\n obj.pop(\"deprecation\", None)\n obj.pop(\"decomposed\", None)\n if obj.get(\"version\") is not None:\n obj[\"title\"] += f\" ({obj.get('version')})\"\n obj.pop(\"version\")\n return obj"
},
{
"identifier": "Artist",
"path": "ymdantic/models/artists/artist.py",
"snippet": "class Artist(YMBaseModel, DeprecatedMixin):\n \"\"\"Pydantic модель, представляющая информацию об артисте.\"\"\"\n\n id: int\n # Уникальный идентификатор артиста.\n name: str\n # Имя артиста.\n various: bool\n # Флаг, указывающий, является ли артист группой.\n composer: bool\n # Флаг, указывающий, является ли артист композитором.\n genres: List[str]\n # Жанры треков артиста.\n disclaimers: List[Literal[\"\"]] # TODO: Проверить, что тут может быть.\n # Список отказов от ответственности артиста.\n cover: Optional[Cover] = None\n # Обложка артиста.\n\n @model_validator(mode=\"before\")\n def validate_genres(cls, artist: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Этот метод класса конвертирует жанры в данных об артисте в новый вид.\n\n Он проверяет, присутствует ли ключ 'genre' в словаре альбома. Если\n он присутствует, он присваивает список, содержащий жанр,\n ключу 'genres' словаря альбома. Если ключ 'genre' отсутствует,\n он присваивает пустой список ключу 'genres'.\n\n :param artist: Словарь, содержащий информацию об артисте.\n :return: Словарь, содержащий информацию об артисте с конвертированными\n жанрами.\n \"\"\"\n genre = artist.get(\"genre\")\n artist.pop(\"genre\", None)\n artist[\"genres\"] = [genre] if genre else []\n return artist\n\n def get_cover_image_url(self, size: str = \"200x200\") -> Optional[HttpUrl]:\n \"\"\"\n Возвращает URL изображения обложки артиста с заданным размером.\n\n :param size: Размер изображения.\n :return: URL изображения обложки артиста с заданным размером.\n \"\"\"\n if self.cover is None:\n return None\n return self.cover.get_image_url(size)"
},
{
"identifier": "YMBaseModel",
"path": "ymdantic/models/base.py",
"snippet": "class YMBaseModel(BaseModel, ClientMixin):\n \"\"\"Базовая Pydantic модель для всех будущих моделей.\"\"\"\n\n model_config = ConfigDict(\n alias_generator=to_camel,\n populate_by_name=True,\n extra=\"forbid\",\n )"
},
{
"identifier": "ChartPosition",
"path": "ymdantic/models/chart_position.py",
"snippet": "class ChartPosition(YMBaseModel):\n \"\"\"Pydantic модель, представляющая позицию трека в чарте.\"\"\"\n\n position: int\n # Позиция трека в чарте.\n progress: Literal[\"same\", \"up\", \"down\", \"new\"]\n # Информация о том, как изменилась позиция трека за последнее время.\n listeners: int\n # Количество слушателей трека на прошлой неделе.\n shift: int\n # Количество позиций, на которое изменилась позиция трека за последнее время.\n bg_color: Optional[str] = None\n # Цвет фона позиции трека."
},
{
"identifier": "R128",
"path": "ymdantic/models/tracks/r128.py",
"snippet": "class R128(YMBaseModel):\n \"\"\"\n Pydantic модель, представляющая данные нормализации громкости по стандарту EBU R128.\n\n Стандарт EBU R128 используется для измерения воспринимаемой громкости аудиоконтента.\n \"\"\"\n\n i: float\n # Значение интегрированной громкости в LUFS (единицы громкости\n # относительно полной шкалы). Это общая громкость всего аудиофрагмента.\n tp: float\n # Истинный пиковый уровень в dBTP (децибелы истинного пика).\n # Это самый высокий уровень в аудиоконтенте, учитывающий межсемпловые пики."
},
{
"identifier": "Fade",
"path": "ymdantic/models/tracks/fade.py",
"snippet": "class Fade(YMBaseModel):\n \"\"\"Pydantic модель, представляющая информацию о постепенном переходе в треке.\"\"\"\n\n in_start: float\n # Время в секундах, когда начинается постепенное увеличение громкости.\n in_stop: float\n # Время в секундах, когда заканчивается постепенное увеличение громкости.\n out_start: float\n # Время в секундах, когда начинается постепенное уменьшение громкости.\n out_stop: float\n # Время в секундах, когда заканчивается постепенное уменьшение громкости."
},
{
"identifier": "DerivedColors",
"path": "ymdantic/models/tracks/derived_colors.py",
"snippet": "class DerivedColors(YMBaseModel):\n \"\"\"Pydantic модель, представляющая производные цвета обложки альбома.\"\"\"\n\n average: str\n # Средний цвет обложки в формате HEX.\n wave_text: str\n # Цвет текста волновой формы в формате HEX.\n mini_player: str\n # Цвет мини-плеера в формате HEX.\n accent: str\n # Акцентный цвет в формате HEX."
},
{
"identifier": "TrackAlbum",
"path": "ymdantic/models/tracks/album.py",
"snippet": "class TrackAlbum(BaseAlbum):\n \"\"\"Pydantic модель, представляющая информацию об альбоме с текущим треком.\"\"\"\n\n start_date: Optional[date] = None\n # Дата начала альбома.\n track_position: Optional[TrackPosition] = None\n # Позиция трека в альбоме (если есть)."
},
{
"identifier": "LyricsInfo",
"path": "ymdantic/models/tracks/lyrics_info.py",
"snippet": "class LyricsInfo(YMBaseModel):\n \"\"\"\n Pydantic модель, представляющая информацию о наличии текста песни.\n\n Наличие текста и синхронизированных текстов песни.\n \"\"\"\n\n has_available_sync_lyrics: bool\n # Флаг, указывающий на наличие синхронизированных текстов песни.\n has_available_text_lyrics: bool\n # Флаг, указывающий на наличие текста песни."
},
{
"identifier": "Major",
"path": "ymdantic/models/tracks/major.py",
"snippet": "class Major(YMBaseModel):\n \"\"\"Pydantic модель, представляющая основную информацию о лейбле трека.\"\"\"\n\n id: int\n # Уникальный идентификатор лейбла.\n name: str\n # Название лейбла."
},
{
"identifier": "DownloadInfo",
"path": "ymdantic/models/tracks/download_info.py",
"snippet": "class DownloadInfo(YMBaseModel):\n \"\"\"Pydantic модель, представляющая информацию о скачивании трека.\"\"\"\n\n codec: CodecType\n # Кодек трека. Возможные значения: \"mp3\", \"aac\".\n gain: bool\n # Флаг для нормализации громкости трека (видимо).\n preview: bool\n # Доступно ли предварительное прослушивание трека.\n download_info_url: HttpUrl\n # Ссылка на S3-хранилище с данными для формирования ссылки на скачивание трека.\n direct: bool\n # Является ли ссылка на S3-хранилище прямой ссылкой на скачивание трека.\n bitrate_in_kbps: int\n # Битрейт трека в кбит/с."
},
{
"identifier": "DownloadInfoDirect",
"path": "ymdantic/models/tracks/download_info.py",
"snippet": "class DownloadInfoDirect(DownloadInfo):\n direct_url_info: S3FileUrl\n\n @property\n def direct_url(self) -> HttpUrl:\n \"\"\"\n Генерирует прямой URL для скачивания трека.\n\n Этот метод возвращает URL, сформированный на основе информации о прямом URL,\n хранящейся в атрибуте 'direct_url_info' экземпляра.\n\n :return: Прямой URL для скачивания трека.\n \"\"\"\n return self.direct_url_info.url"
}
] | from typing import List, Optional, Literal
from pydantic import HttpUrl
from ymdantic.mixins import DeprecatedMixin
from ymdantic.models.artists import Artist
from ymdantic.models.base import YMBaseModel
from ymdantic.models.chart_position import ChartPosition
from ymdantic.models.tracks.r128 import R128
from ymdantic.models.tracks.fade import Fade
from ymdantic.models.tracks.derived_colors import DerivedColors
from ymdantic.models.tracks.album import TrackAlbum
from ymdantic.models.tracks.lyrics_info import LyricsInfo
from ymdantic.models.tracks.major import Major
from ymdantic.models.tracks.download_info import DownloadInfo, DownloadInfoDirect | 5,019 | og_image: Optional[str] = None
# OG изображение трека (если есть). OG изображение - это изображение,
# которое отображается при публикации ссылки на трек.
derived_colors: Optional[DerivedColors] = None
# Производные цвета трека (если есть). Производные цвета - это цвета,
# которые были получены из обложки трека.
clip_ids: Optional[List[int]] = None
# Идентификаторы клипов трека. Клип - это видео, которое относится к треку.
content_warning: Optional[str] = None
# Предупреждение о содержании трека (если есть).
is_suitable_for_children: Optional[bool] = None
# Подходит ли трек для детей (если есть).
background_video_uri: Optional[HttpUrl] = None
# URI фонового видео трека (если есть). Фоновое видео - это видео,
# которое отображается вместо обложки трека.
player_id: Optional[str] = None
# Идентификатор плеера трека (если есть). Плеер требуется для
# отображения фонового видео.
best: Optional[bool] = None
# Является ли трек лучшим (поле доступно при получении альбома с треками
# `get_album_with_tracks`).
@property
def artists_names(self) -> Optional[str]:
"""
Получает имена артистов трека.
:return: Имена артистов трека.
"""
if not self.artists:
return None
return ", ".join(artist.name for artist in self.artists)
def get_cover_image_url(self, size: str = "200x200") -> Optional[HttpUrl]:
"""
Получает URL изображения обложки.
:param size: Размер изображения обложки в пикселях.
По умолчанию 200x200.
:return: URL изображения обложки.
"""
if self.cover_uri is None:
return None
return HttpUrl(f"https://{self.cover_uri.replace('%%', size)}")
def get_og_image_url(self, size: str = "200x200") -> Optional[HttpUrl]:
"""
Получает URL изображения обложки.
:param size: Размер изображения обложки в пикселях.
По умолчанию 200x200.
:return: URL изображения обложки.
"""
if self.og_image is None:
return None
return HttpUrl(f"https://{self.og_image.replace('%%', size)}")
class UnavailableTrack(BaseTrack):
"""
Pydantic модель, представляющая недоступный трек.
В случае, если трек недоступен, то его нельзя скачать и прослушать.
Большинство полей, такие как: `storage_dir`, `available_for_options`,
`duration_ms`, `preview_duration_ms`, `file_size` и `lyrics_info` по
сути своей бесполезны для недоступных вида треков и зачастую
отсутствуют. Но по какой-то причине в некоторых треках они всё же есть.
"""
available: Literal[False]
# Доступность трека. В данном случае трек недоступен.
error: Optional[Literal["no-rights"]] = None
# Ошибка, связанная с треком. В данном случае может быть ошибка
# "no-rights", что означает отсутствие прав на трек.
title: Optional[str] = None
# Название трека. В данном случае название может отсутствовать
# (возникает очень редко).
track_sharing_flag: Optional[str] = None
# Флаг, указывающий на возможность делиться треком. В данном случае
# может отсутствовать (возникает очень редко).
storage_dir: Optional[str] = None
# Директория хранения трека. У недоступных треков почти всегда равна
# пустой строке или отсутствует.
available_for_options: Optional[AvailableForOptions] = None
# Доступные опции для трека. В данном случае опции могут отсутствовать.
duration_ms: Optional[int] = None
# Длительность трека в миллисекундах. В данном случае длительность может
# отсутствовать.
preview_duration_ms: Optional[int] = None
# Длительность предпросмотра трека в миллисекундах. В данном случае
# длительность предпросмотра может отсутствовать.
file_size: Optional[int] = None
# Размер файла трека. В данном случае размер файла может отсутствовать.
lyrics_info: Optional[LyricsInfo] = None
# Информация о тексте песни. В данном случае информация о тексте песни
# может отсутствовать.
class Track(BaseTrack):
available: Literal[True]
# Доступность трека. В данном случае трек доступен.
title: str
# Название трека.
track_sharing_flag: str
# Флаг, указывающий на возможность делиться треком.
storage_dir: str
# Директория хранения трека.
lyrics_info: LyricsInfo
# Информация о тексте песни.
duration_ms: int
# Длительность трека в миллисекундах.
preview_duration_ms: int
# Длительность предпросмотра трека в миллисекундах.
file_size: Literal[0]
# Размер файла трека. Всегда равен 0, видимо старая заглушка.
available_for_options: AvailableForOptions
# Доступные опции для трека.
chart: Optional[ChartPosition] = None
# Информация о чарте, если трек входит в чарт.
|
AvailableForOptions = List[Literal["bookmate"]]
TrackSource = Literal["OWN", "OWN_REPLACED_TO_UGC"]
class BaseTrack(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая базовую информацию о любом треке."""
type: Literal["music", "asmr", "audiobook", "noise", "fairy-tale"]
# Тип трека.
id: str
# Идентификатор трека. Идентификатор трека - это уникальный
# идентификатор, по которому можно получить трек.
real_id: str
# Реальный идентификатор трека. Заглушка для замещенных треков.
available: bool
# Доступность трека. В данном случае трек недоступен. Это влияет на то,
# можно ли скачать и прослушать трек.
available_for_premium_users: bool
# Доступность трека для премиум пользователей.
available_full_without_permission: bool
# Полная доступность трека без разрешения.
disclaimers: List[Literal["modal"]]
# Список отказов от ответственности трека.
artists: List[Artist]
# Список артистов трека. Может быть пустым.
albums: List[TrackAlbum]
# Список альбомов трека. Может быть пустым.
lyrics_available: bool
# Доступность текста песни. Если текст песни доступен, то можно получить
# текст песни по данным из LyricsInfo.
remember_position: bool
# Запоминать ли позицию трека. В типе "music" зачастую равен False.
# В основном используется для подкастов, комментариев и аудиокниг.
track_source: TrackSource
# Источник трека
major: Optional[Major] = None
# Лейбл трека (если есть)
r128: Optional[R128] = None
# Значение R128 трека (если есть). R128 - это стандарт, который
# определяет уровень громкости аудио.
fade: Optional[Fade] = None
# Значение затухания трека (если есть). Затухание - это изменение
# громкости аудио на определенном участке.
cover_uri: Optional[str] = None
# URI обложки трека (если есть).
og_image: Optional[str] = None
# OG изображение трека (если есть). OG изображение - это изображение,
# которое отображается при публикации ссылки на трек.
derived_colors: Optional[DerivedColors] = None
# Производные цвета трека (если есть). Производные цвета - это цвета,
# которые были получены из обложки трека.
clip_ids: Optional[List[int]] = None
# Идентификаторы клипов трека. Клип - это видео, которое относится к треку.
content_warning: Optional[str] = None
# Предупреждение о содержании трека (если есть).
is_suitable_for_children: Optional[bool] = None
# Подходит ли трек для детей (если есть).
background_video_uri: Optional[HttpUrl] = None
# URI фонового видео трека (если есть). Фоновое видео - это видео,
# которое отображается вместо обложки трека.
player_id: Optional[str] = None
# Идентификатор плеера трека (если есть). Плеер требуется для
# отображения фонового видео.
best: Optional[bool] = None
# Является ли трек лучшим (поле доступно при получении альбома с треками
# `get_album_with_tracks`).
@property
def artists_names(self) -> Optional[str]:
"""
Получает имена артистов трека.
:return: Имена артистов трека.
"""
if not self.artists:
return None
return ", ".join(artist.name for artist in self.artists)
def get_cover_image_url(self, size: str = "200x200") -> Optional[HttpUrl]:
"""
Получает URL изображения обложки.
:param size: Размер изображения обложки в пикселях.
По умолчанию 200x200.
:return: URL изображения обложки.
"""
if self.cover_uri is None:
return None
return HttpUrl(f"https://{self.cover_uri.replace('%%', size)}")
def get_og_image_url(self, size: str = "200x200") -> Optional[HttpUrl]:
"""
Получает URL изображения обложки.
:param size: Размер изображения обложки в пикселях.
По умолчанию 200x200.
:return: URL изображения обложки.
"""
if self.og_image is None:
return None
return HttpUrl(f"https://{self.og_image.replace('%%', size)}")
class UnavailableTrack(BaseTrack):
"""
Pydantic модель, представляющая недоступный трек.
В случае, если трек недоступен, то его нельзя скачать и прослушать.
Большинство полей, такие как: `storage_dir`, `available_for_options`,
`duration_ms`, `preview_duration_ms`, `file_size` и `lyrics_info` по
сути своей бесполезны для недоступных вида треков и зачастую
отсутствуют. Но по какой-то причине в некоторых треках они всё же есть.
"""
available: Literal[False]
# Доступность трека. В данном случае трек недоступен.
error: Optional[Literal["no-rights"]] = None
# Ошибка, связанная с треком. В данном случае может быть ошибка
# "no-rights", что означает отсутствие прав на трек.
title: Optional[str] = None
# Название трека. В данном случае название может отсутствовать
# (возникает очень редко).
track_sharing_flag: Optional[str] = None
# Флаг, указывающий на возможность делиться треком. В данном случае
# может отсутствовать (возникает очень редко).
storage_dir: Optional[str] = None
# Директория хранения трека. У недоступных треков почти всегда равна
# пустой строке или отсутствует.
available_for_options: Optional[AvailableForOptions] = None
# Доступные опции для трека. В данном случае опции могут отсутствовать.
duration_ms: Optional[int] = None
# Длительность трека в миллисекундах. В данном случае длительность может
# отсутствовать.
preview_duration_ms: Optional[int] = None
# Длительность предпросмотра трека в миллисекундах. В данном случае
# длительность предпросмотра может отсутствовать.
file_size: Optional[int] = None
# Размер файла трека. В данном случае размер файла может отсутствовать.
lyrics_info: Optional[LyricsInfo] = None
# Информация о тексте песни. В данном случае информация о тексте песни
# может отсутствовать.
class Track(BaseTrack):
available: Literal[True]
# Доступность трека. В данном случае трек доступен.
title: str
# Название трека.
track_sharing_flag: str
# Флаг, указывающий на возможность делиться треком.
storage_dir: str
# Директория хранения трека.
lyrics_info: LyricsInfo
# Информация о тексте песни.
duration_ms: int
# Длительность трека в миллисекундах.
preview_duration_ms: int
# Длительность предпросмотра трека в миллисекундах.
file_size: Literal[0]
# Размер файла трека. Всегда равен 0, видимо старая заглушка.
available_for_options: AvailableForOptions
# Доступные опции для трека.
chart: Optional[ChartPosition] = None
# Информация о чарте, если трек входит в чарт.
| async def get_download_info(self) -> List[DownloadInfo]: | 10 | 2023-12-21 21:24:10+00:00 | 8k |
MMC-K/multimodal_understanding | text_generated_image_to_image_retriever/text_to_image_retrieval.py | [
{
"identifier": "FaissScorerExhaustiveGPU",
"path": "index_scorer.py",
"snippet": "class FaissScorerExhaustiveGPU(object):\n _NEED_TO_SET_CANDIDATES=False\n\n def __init__(self, \n fvec_root,\n nprobe=1,\n gpu=0,\n **kwargs,\n ) -> None:\n self.gpu = gpu\n\n self.fpath_list = list(sorted(list(glob.glob(os.path.join(fvec_root, \"*\")))))\n self.index = self.load_index(gpu)\n self.index.nprobe = nprobe\n\n def load_index(self, fvec_root, gpu=0):\n # gpu resources\n res = faiss.StandardGpuResources()\n\n logger.info('loading fvecs...')\n data = [fvecs_read(path) for path in self.fpath_list]\n d = data[0].shape[-1]\n \n logger.info('vector dim: {}'.format(d))\n index_flat = faiss.IndexFlatIP(d)\n index = faiss.index_cpu_to_gpu(res, gpu, index_flat)\n logger.info('adding index...')\n for ds in data:\n index.add(ds)\n \n return index\n \n def get_topk(self, query_vec, k=4):\n return self.index.search(query_vec, k)"
},
{
"identifier": "FaissScorerExhaustiveMultiGPU",
"path": "index_scorer.py",
"snippet": "class FaissScorerExhaustiveMultiGPU(object):\n _NEED_TO_SET_CANDIDATES=False\n\n def __init__(self, \n fvec_root,\n nprobe=1,\n gpu_list=None,\n **kwargs,\n ) -> None:\n self.fpath_list = list(sorted(list(glob.glob(os.path.join(fvec_root, \"*\")))))\n\n self.gpu_list = gpu_list\n if self.gpu_list is None:\n self.gpu_list = list(range(faiss.get_num_gpus()))\n\n self.index = self.load_index(fvec_root)\n self.index.nprobe = nprobe\n\n\n def load_index(self, fvec_root):\n\n logger.info('loading fvecs...')\n logger.info(self.fpath_list)\n data = [fvecs_read(path) for path in self.fpath_list]\n data = np.concatenate(tuple(data), axis=0)\n d = data.shape[-1]\n \n logger.info('vector dim: {}'.format(d))\n index_flat = faiss.IndexFlatIP(d)\n gmco = faiss.GpuMultipleClonerOptions()\n gmco.shard = True\n index = faiss.index_cpu_to_gpus_list(index_flat, gmco, self.gpu_list)\n\n logger.info('adding index...')\n index.add(data)\n \n return index\n \n def get_topk(self, query_vec, k=4):\n return self.index.search(query_vec, k)"
},
{
"identifier": "FaissScorer",
"path": "index_scorer.py",
"snippet": "class FaissScorer(FaissScorerBase):\n\n def __init__(self, \n index_path,\n fvec_root=\"\",\n proportion_for_training=1.0,\n index_str=\"IVF65536,Flat\",\n nprobe=4,\n **kwargs,\n ) -> None:\n super(FaissScorer, self).__init__(fvec_root)\n \n self.index_path=index_path\n self.proportion_for_training = proportion_for_training\n \n self.index = self.load_index(index_str)\n self.index.nprobe = nprobe\n\n def load_index(self, index_str=\"IVF65536,Flat\"):\n if not os.path.isfile(self.index_path):\n data = self.load_data(self.proportion_for_training)\n d = data.shape[-1]\n index = faiss.index_factory(d, index_str, faiss.METRIC_INNER_PRODUCT)\n logger.info('training index...')\n index.train(data)\n logger.info('loading fvecs...')\n data = self.load_data()\n logger.info('adding index...')\n index.add(data)\n faiss.write_index(index, self.index_path)\n \n return faiss.read_index(self.index_path)\n \n def get_topk(self, query_vec, k=4):\n return self.index.search(query_vec, k)"
},
{
"identifier": "DatasetForImages",
"path": "data_utils.py",
"snippet": "class DatasetForImages(Dataset):\n def __init__(\n self,\n file_path: str,\n image_tokenizer: ViTFeatureExtractor,\n shard_idx: int=0,\n num_shards: int=1,\n image_root_dir=None,\n ):\n super().__init__()\n self.file_path = file_path\n self.image_tokenizer = image_tokenizer\n self.image_root_dir=image_root_dir\n\n logger.info(\"loading dataset...\")\n\n self.data = [\n item for item in csv.DictReader(\n open(file_path, \"r\"), \n delimiter=\"\\t\", \n quoting=csv.QUOTE_NONE, \n fieldnames=['path', 'image_url']\n )\n ]\n\n self.shard_idx = shard_idx\n if num_shards > 1:\n self.data = get_list(self.data, num_shards, shard_idx)\n\n logger.info(\"{} examples was loaded.\".format(len(self.data)))\n\n def __getitem__(self, index):\n sample = self.data[index]\n\n path = sample[\"path\"]\n if self.image_root_dir is not None:\n path = os.path.join(self.image_root_dir, path)\n\n image = Image.open(path).convert(\"RGB\")\n\n image_feature = self.image_tokenizer(images=image, return_tensors=\"pt\")\n\n return {\n \"pixel_values\": image_feature[\"pixel_values\"],\n }\n\n def __len__(self):\n return len(self.data)\n\n def get_collate_fn(self):\n def collate_fn(samples):\n if len(samples) == 0:\n return {}\n return {\n \"pixel_values\": default_collate([s[\"pixel_values\"][0] for s in samples])\n }\n return collate_fn"
},
{
"identifier": "VisionT5SimpleBiEncoder",
"path": "modeling_encoder.py",
"snippet": "class VisionT5SimpleBiEncoder(BiEncoderBase):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5SimpleBiEncoder, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderSimple.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderSimple.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "VisionT5MeanBiEncoder",
"path": "modeling_encoder.py",
"snippet": "class VisionT5MeanBiEncoder(BiEncoderBase):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5MeanBiEncoder, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderMean.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderMean.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "VisionT5SimpleBiEncoderHN",
"path": "modeling_encoder.py",
"snippet": "class VisionT5SimpleBiEncoderHN(BiEncoderBaseHN):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5SimpleBiEncoderHN, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderSimple.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderSimple.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "VisionT5MeanBiEncoderHN",
"path": "modeling_encoder.py",
"snippet": "class VisionT5MeanBiEncoderHN(BiEncoderBaseHN):\n _ENCODER_TYPE='biencoder'\n\n def __init__(self,\n args=None,\n vision_encoder=None,\n language_encoder=None):\n super(VisionT5MeanBiEncoderHN, self).__init__(\n args=args,\n vision_encoder=vision_encoder,\n language_encoder=language_encoder\n )\n\n def load_weight_from_args(self, args):\n self.vision_encoder = ViTModel.from_pretrained(args.vision_model)\n self.language_encoder = T5EncoderMean.from_pretrained(args.language_model)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n root_path = args[0]\n \n enc_path_q = os.path.join(root_path, \"vision\")\n args_q = copy.deepcopy(list(args))\n args_q[0] = enc_path_q\n vision_encoder = ViTModel.from_pretrained(*tuple(args_q), **kwargs)\n\n enc_path_k = os.path.join(root_path, \"language\")\n args_k = copy.deepcopy(list(args))\n args_k[0] = enc_path_k\n language_encoder = T5EncoderMean.from_pretrained(*tuple(args_k), **kwargs)\n\n return cls(vision_encoder=vision_encoder, language_encoder=language_encoder)"
},
{
"identifier": "create_directory_info",
"path": "training_retriever.py",
"snippet": "def create_directory_info(args, create_dir=True):\n\n model_dir = os.path.join(args.output_dir, \"{}-{}-{}\".format(\n args.model_cls.replace('/', '_'), \n args.vision_model.replace('/', '_'), \n args.language_model.replace('/', '_')))\n if args.dir_suffix is not None:\n model_dir = '_'.join([model_dir, args.dir_suffix])\n weights_dir = os.path.join(model_dir, \"weights\")\n logs_dir = os.path.join(model_dir, \"logs\")\n\n path_info = {\n 'model_dir': model_dir,\n 'weights_dir': weights_dir,\n 'logs_dir': logs_dir,\n }\n\n if create_dir:\n for k, v in path_info.items():\n create_dir_if_not_exist(v)\n\n path_info['best_model_path'] = os.path.join(weights_dir, \"best_model.pth\")\n path_info['ckpt_path'] = os.path.join(weights_dir, \"checkpoint.pth\")\n return path_info"
},
{
"identifier": "MODEL_CLS",
"path": "training_retriever.py",
"snippet": "MODEL_CLS = {\n \"VisionT5SimpleBiEncoder\": {\n \"model_cls\": VisionT5SimpleBiEncoder,\n },\n \"VisionT5MeanBiEncoder\": {\n \"model_cls\": VisionT5MeanBiEncoder,\n },\n \"VisionT5SimpleBiEncoderHN\": {\n \"model_cls\": VisionT5SimpleBiEncoderHN,\n },\n \"VisionT5MeanBiEncoderHN\": {\n \"model_cls\": VisionT5MeanBiEncoderHN,\n },\n}"
}
] | import argparse
import sys
import os
import csv
import time
import json
import shutil
import logging
import hashlib
import functools
import numpy as np
import tqdm
import torch
import torch.nn.functional as F
import torch.distributed as dist
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from numpy.core.numeric import indices
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss
from torch import optim
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer, ViTFeatureExtractor
from torch.utils.tensorboard import SummaryWriter
from index_scorer import FaissScorerExhaustiveGPU, FaissScorerExhaustiveMultiGPU, FaissScorer
from data_utils import DatasetForImages
from modeling_encoder import (
VisionT5SimpleBiEncoder,
VisionT5MeanBiEncoder,
VisionT5SimpleBiEncoderHN,
VisionT5MeanBiEncoderHN,
)
from training_retriever import (
create_directory_info,
MODEL_CLS) | 4,376 | # See the License for the specific language governing permissions and
# limitations under the License.
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../")
logger = logging.getLogger(__name__)
faiss_scorer = None
image_tokenizer = None
text_tokenizer = None
model = None
ref_data = None
def retrieve_image_with_text(text_query_list, FVECS_DIR="result/simple_query_ko/fvecs", HF_PATH="result/simple_query_ko/hf_model", MARKDOWN_OUT="result/simple_query_ko/md"):
global faiss_scorer, image_tokenizer, model, ref_data
parser = argparse.ArgumentParser()
# data
parser.add_argument("--data_path",
default="cc12m_filtered.tsv", type=str)
# parser.add_argument("--query_path",
# default="query.json", type=str)
parser.add_argument("--fvecs_dir",
default=None, type=str)
parser.add_argument("--index_path",
default=None, type=str)
parser.add_argument("--index_str",
default="IVF65536,Flat", type=str)
parser.add_argument("--nprobe",
default=4, type=int)
# model
parser.add_argument("--vision_model",
default="google/vit-base-patch16-384", type=str)
parser.add_argument("--language_model",
default="KETI-AIR/ke-t5-base", type=str)
parser.add_argument("--model_cls", default="VisionT5MeanBiEncoder",
choices=["VisionT5SimpleBiEncoder",
"VisionT5MeanBiEncoder"],
type=str, help="model class")
parser.add_argument("--dir_suffix",
default=None, type=str)
parser.add_argument("--output_dir",
default="output", type=str)
parser.add_argument("--markdown_out",
default="md", type=str)
# resume
parser.add_argument("--hf_path", default=None, type=str,
help="path to score huggingface model")
parser.add_argument("--topk", default=10,
type=int, help="top k")
parser.add_argument("--image_size", default=180,
type=int, help="image size for html formatting")
# default settings for training, evaluation
parser.add_argument("--batch_size", default=16,
type=int, help="mini batch size")
parser.add_argument("--num_workers", default=0, type=int,
help="number of workers")
# distributed setting
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--local_world_size", type=int, default=1,
help="The size of the local worker group.")
parser.add_argument("--rank", type=int, default=0,
help="The rank of the worker within a worker group.")
parser.add_argument("--world_size", type=int, default=1,
help="world size. (num_nodes*num_dev_per_node)")
parser.add_argument("--distributed", action='store_true',
help="is distributed training")
parser.add_argument('--model_gpu',
default=0, type=int)
parser.add_argument('--scorer_gpus', nargs="+",
default=[0, 1, 2, 3], type=int)
# --data_path ../kfashion_images_group.tsv --fvecs_dir result/simple_query_ko/fvecs --hf_path result/simple_query_ko/hf_model --query_path query.json --markdown_out result/simple_query_ko/md --model_cls VisionT5MeanBiEncoder
args = parser.parse_args(["--data_path", "../kfashion_images_group.tsv",\
"--fvecs_dir", FVECS_DIR, \
"--hf_path", HF_PATH,\
"--markdown_out", MARKDOWN_OUT,\
"--model_cls", "VisionT5MeanBiEncoder",\
"--scorer_gpus", "0"])
print(args.scorer_gpus)
print(args.fvecs_dir)
path_info = create_directory_info(args, create_dir=False)
if args.fvecs_dir is None:
args.fvecs_dir = os.path.join(path_info["model_dir"], "fvecs")
if args.hf_path.lower()=='default':
args.hf_path = os.path.join(path_info["model_dir"], "hf")
model_device = torch.device('cuda:{}'.format(args.model_gpu))
if faiss_scorer is None:
faiss_scorer = FaissScorerExhaustiveMultiGPU(
fvec_root=args.fvecs_dir,
gpu_list=args.scorer_gpus
)
# get model class
| # Copyright 2022 san kim
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../")
logger = logging.getLogger(__name__)
faiss_scorer = None
image_tokenizer = None
text_tokenizer = None
model = None
ref_data = None
def retrieve_image_with_text(text_query_list, FVECS_DIR="result/simple_query_ko/fvecs", HF_PATH="result/simple_query_ko/hf_model", MARKDOWN_OUT="result/simple_query_ko/md"):
global faiss_scorer, image_tokenizer, model, ref_data
parser = argparse.ArgumentParser()
# data
parser.add_argument("--data_path",
default="cc12m_filtered.tsv", type=str)
# parser.add_argument("--query_path",
# default="query.json", type=str)
parser.add_argument("--fvecs_dir",
default=None, type=str)
parser.add_argument("--index_path",
default=None, type=str)
parser.add_argument("--index_str",
default="IVF65536,Flat", type=str)
parser.add_argument("--nprobe",
default=4, type=int)
# model
parser.add_argument("--vision_model",
default="google/vit-base-patch16-384", type=str)
parser.add_argument("--language_model",
default="KETI-AIR/ke-t5-base", type=str)
parser.add_argument("--model_cls", default="VisionT5MeanBiEncoder",
choices=["VisionT5SimpleBiEncoder",
"VisionT5MeanBiEncoder"],
type=str, help="model class")
parser.add_argument("--dir_suffix",
default=None, type=str)
parser.add_argument("--output_dir",
default="output", type=str)
parser.add_argument("--markdown_out",
default="md", type=str)
# resume
parser.add_argument("--hf_path", default=None, type=str,
help="path to score huggingface model")
parser.add_argument("--topk", default=10,
type=int, help="top k")
parser.add_argument("--image_size", default=180,
type=int, help="image size for html formatting")
# default settings for training, evaluation
parser.add_argument("--batch_size", default=16,
type=int, help="mini batch size")
parser.add_argument("--num_workers", default=0, type=int,
help="number of workers")
# distributed setting
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--local_world_size", type=int, default=1,
help="The size of the local worker group.")
parser.add_argument("--rank", type=int, default=0,
help="The rank of the worker within a worker group.")
parser.add_argument("--world_size", type=int, default=1,
help="world size. (num_nodes*num_dev_per_node)")
parser.add_argument("--distributed", action='store_true',
help="is distributed training")
parser.add_argument('--model_gpu',
default=0, type=int)
parser.add_argument('--scorer_gpus', nargs="+",
default=[0, 1, 2, 3], type=int)
# --data_path ../kfashion_images_group.tsv --fvecs_dir result/simple_query_ko/fvecs --hf_path result/simple_query_ko/hf_model --query_path query.json --markdown_out result/simple_query_ko/md --model_cls VisionT5MeanBiEncoder
args = parser.parse_args(["--data_path", "../kfashion_images_group.tsv",\
"--fvecs_dir", FVECS_DIR, \
"--hf_path", HF_PATH,\
"--markdown_out", MARKDOWN_OUT,\
"--model_cls", "VisionT5MeanBiEncoder",\
"--scorer_gpus", "0"])
print(args.scorer_gpus)
print(args.fvecs_dir)
path_info = create_directory_info(args, create_dir=False)
if args.fvecs_dir is None:
args.fvecs_dir = os.path.join(path_info["model_dir"], "fvecs")
if args.hf_path.lower()=='default':
args.hf_path = os.path.join(path_info["model_dir"], "hf")
model_device = torch.device('cuda:{}'.format(args.model_gpu))
if faiss_scorer is None:
faiss_scorer = FaissScorerExhaustiveMultiGPU(
fvec_root=args.fvecs_dir,
gpu_list=args.scorer_gpus
)
# get model class | model_cls_cfg = MODEL_CLS[args.model_cls] | 9 | 2023-12-18 10:37:51+00:00 | 8k |
CASIA-IVA-Lab/FLAP | lib/prune.py | [
{
"identifier": "WrappedGPT",
"path": "lib/layerwrapper.py",
"snippet": "class WrappedGPT:\n \"\"\"\n This class wraps a GPT layer for specific operations.\n \"\"\"\n\n def __init__(self, layer, layer_id=0, layer_name=\"none\"):\n self.layer = layer\n self.dev = self.layer.weight.device\n self.rows = layer.weight.data.shape[0]\n self.columns = layer.weight.data.shape[1]\n\n self.scaler_row = torch.zeros((self.columns), device=self.dev)\n self.nsamples = 0\n\n self.layer_id = layer_id \n self.layer_name = layer_name\n\n def add_batch(self, inp, out):\n if len(inp.shape) == 2:\n inp = inp.unsqueeze(0)\n tmp = inp.shape[0]\n if isinstance(self.layer, nn.Linear):\n if len(inp.shape) == 3:\n inp = inp.reshape((-1, inp.shape[-1]))\n inp = inp.t()\n \n self.scaler_row *= self.nsamples / (self.nsamples+tmp)\n self.nsamples += tmp\n\n inp = inp.type(torch.float32)\n self.scaler_row += torch.norm(inp, p=2, dim=1) ** 2 / self.nsamples\n \n def free(self):\n self.scaler_row = None\n torch.cuda.empty_cache()"
},
{
"identifier": "BiasGPT",
"path": "lib/layerwrapper.py",
"snippet": "class BiasGPT:\n \"\"\"\n This class wraps a GPT layer for specific operations.\n \"\"\"\n def __init__(self, layer, metric):\n self.layer = layer\n self.dev = self.layer.weight.device\n self.out_dim = layer.weight.data.shape[0]\n self.in_dim = layer.weight.data.shape[1]\n self.type = metric\n self.nsamples = 0\n\n self.baseline_inp = torch.zeros((self.in_dim), device=self.dev)\n if self.type == \"WIFN\":\n self.scaler_inp = torch.zeros((self.in_dim), device=self.dev)\n else: \n self.fluc_inp = torch.zeros((self.in_dim), device=self.dev)\n\n def add_batch(self, inp, out):\n if len(inp.shape) == 2:\n inp = inp.unsqueeze(0)\n batch_size = inp.shape[0]\n if isinstance(self.layer, nn.Linear):\n if len(inp.shape) == 3:\n inp = inp.reshape((-1, inp.shape[-1]))\n inp = inp.t() # (dim, seqlen)\n\n old_baseline_inp = self.baseline_inp\n self.baseline_inp *= self.nsamples / (self.nsamples + batch_size)\n self.baseline_inp += torch.mean(inp, dim=1) / (self.nsamples + batch_size)\n if self.type == \"WIFN\":\n inp = inp.type(torch.float32)\n self.scaler_inp *= self.nsamples / (self.nsamples + batch_size)\n self.scaler_inp += torch.norm(inp, p=2, dim=1) ** 2 / (self.nsamples + batch_size)\n else:\n if self.nsamples == 0:\n self.fluc_inp = 0\n else:\n self.fluc_inp *= (self.nsamples - 1) / (self.nsamples + batch_size - 1)\n self.fluc_inp += torch.sum((inp - self.baseline_inp.unsqueeze(1)) * (inp - old_baseline_inp.unsqueeze(1)), dim=1) / (self.nsamples + batch_size) # a²+b²+c²...没开根号\n\n self.nsamples += batch_size\n\n \n def free(self):\n self.baseline_inp = None\n if hasattr(self, 'fluc_inp'):\n self.fluc_inp = None\n if hasattr(self, 'scaler_inp'):\n self.scaler_inp = None\n torch.cuda.empty_cache() "
},
{
"identifier": "get_loaders",
"path": "lib/data.py",
"snippet": "def get_loaders(name='wikitext2', nsamples=128, seed=0, seqlen=2048, tokenizer=None):\n \"\"\"\n Select the appropriate loader based on dataset name.\n\n Args:\n name (str): The name of the dataset ('wikitext2', 'c4', or 'ptb').\n nsamples (int): Number of samples to generate from the training set.\n seed (int): Random seed for reproducibility.\n seqlen (int): Sequence length for generated samples.\n tokenizer (Tokenizer): Tokenizer instance for encoding texts.\n\n Returns:\n tuple: A tuple containing trainloader (list of input and target pairs) and encoded validation/test set.\n \"\"\"\n # Determine which dataset to use based on 'name' parameter and return corresponding loader\n if 'wikitext2' in name:\n return get_wikitext2(nsamples, seed, seqlen, tokenizer)\n elif \"c4\" in name:\n return get_c4(nsamples, seed, seqlen, tokenizer)\n elif \"ptb\" in name:\n return get_ptb(nsamples, seed, seqlen, tokenizer)"
}
] | import torch
import torch.nn as nn
import math
from .layerwrapper import WrappedGPT, BiasGPT
from .data import get_loaders
from tqdm import tqdm | 4,665 | for name in wrapped_layers:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for name in subset:
if name == 'self_attn.o_proj':
W_metric = metrics[args.metrics](wrapped_layers, subset, name) ** 2
if args.structure == "UL-UM":
W_metric = W_metric.reshape(-1, 128).sum(dim=1)
thresh = torch.sort(W_metric.cuda())[0][int(args.pruning_ratio*layer.self_attn.num_heads)].cpu()
W_mask = (W_metric>=thresh)
attn_mask.append(W_mask)
elif args.structure == "UL-MM":
W_metric = W_metric.reshape(-1, 128).sum(dim=1)
thresh = torch.sort(W_metric.cuda())[0][args.remove_heads // len(layers)].cpu()
W_mask = (W_metric>=thresh)
attn_mask.append(W_mask)
else:
attn_metric_list.append(W_metric.cpu())
attn_baseline_inp_list.append(wrapped_layers[name].baseline_inp.type(torch.half))
else:
W_metric = metrics[args.metrics](wrapped_layers, subset, name)
if args.structure == "UL-UM":
thresh = torch.sort(W_metric.cuda())[0][int(W_metric.numel()*args.pruning_ratio)].cpu()
W_mask = (W_metric>=thresh)
mlp_mask.append(W_mask)
elif args.structure == "UL-MM":
thresh = torch.sort(W_metric.cuda())[0][cal_remove_neuron(args, model)].cpu()
W_mask = (W_metric>=thresh)
mlp_mask.append(W_mask)
else:
mlp_metric_list.append(W_metric.cpu())
mlp_baseline_inp_list.append(wrapped_layers[name].baseline_inp.type(torch.half))
wrapped_layers[name].free()
inps, outs = outs, inps # Use the original output as input to the next layer
torch.cuda.empty_cache()
standarlization = lambda x: (x - torch.mean(x, axis=1, keepdim=True)) / torch.std(x, axis=1, keepdim=True)
if args.structure in ["AL-MM", "AL-AM"]:
attn_metric = torch.stack(attn_metric_list)
attn_metric = standarlization(attn_metric)
attn_metric = attn_metric.reshape(len(layers), -1, 128).mean(dim=2)
mlp_metric = torch.stack(mlp_metric_list)
mlp_metric = standarlization(mlp_metric)
if args.structure == "AL-MM":
sorted_attn = torch.sort(attn_metric.view(-1), descending=True)[0]
attn_thres = sorted_attn[-int(args.remove_heads)]
attn_mask = (attn_metric > attn_thres) # 1 means retain
sorted_mlp = torch.sort(mlp_metric.view(-1), descending=True)[0]
mlp_thres = sorted_mlp[-cal_remove_neuron(args, model)]
mlp_mask = (mlp_metric > mlp_thres)
else:
prune_metric = torch.cat([attn_metric.view(-1), mlp_metric.view(-1)])
sorted_prune, indices = torch.sort(prune_metric, descending=True)
compression_weight = torch.ones_like(indices)
compression_weight[indices < attn_metric.numel()] = 512.0 / 3
threshold = sorted_prune[torch.argmin(torch.abs(torch.cumsum(compression_weight, 0) - torch.sum(compression_weight)*(1 - args.pruning_ratio)))]
attn_mask = (attn_metric > threshold)
mlp_mask = (mlp_metric > threshold)
else:
attn_mask = torch.stack(attn_mask)
mlp_mask = torch.stack(mlp_mask)
for idx in range(len(layers)):
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}):
compress(model.model.layers[idx], attn_mask[idx], None, attn_baseline_inp_list[idx], None, model.hf_device_map[f"model.layers.{idx}"], unstr=args.unstr)
else:
compress(model.model.layers[idx], attn_mask[idx], None, attn_baseline_inp_list[idx], None, device, unstr=args.unstr)
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}):
compress(model.model.layers[idx], None, mlp_mask[idx], None, mlp_baseline_inp_list[idx], model.hf_device_map[f"model.layers.{idx}"], unstr=args.unstr)
else:
compress(model.model.layers[idx], None, mlp_mask[idx], None, mlp_baseline_inp_list[idx], device, unstr=args.unstr)
model.config.use_cache = use_cache
torch.cuda.empty_cache()
def prune_wanda_sp(args, model, tokenizer, device=torch.device("cuda:0")):
"""
Wanda on structured pruning.
Args:
args (object): Command line arguments parsed via argparse.
model (nn.Module): PyTorch model to prune.
tokenizer (Tokenizer): Tokenizer associated with the model.
device (torch.device, optional): Device to move tensors to. Defaults to CUDA device 0.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=128,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = {}
subset.update({'self_attn.o_proj': find_layers(layer)['self_attn.o_proj']})
subset.update({'mlp.down_proj': find_layers(layer)['mlp.down_proj']})
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}): ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
|
# create a dictionary to map the method name to the function
"""
'IFV': Input Feature Variance
'WIFV': Weighted Input Feature Variance
'WIFN': Weighted Input Feature Norm
"""
metrics = {
'IFV': lambda wrapped_layers, subset, name: wrapped_layers[name].fluc_inp,
'WIFV': lambda wrapped_layers, subset, name: wrapped_layers[name].fluc_inp * torch.sum(subset[name].weight.data.pow(2), dim=0),
'WIFN': lambda wrapped_layers, subset, name: (torch.abs(subset[name].weight.data) * torch.sqrt(wrapped_layers[name].scaler_inp.reshape((1,-1)))).mean(axis=0),
}
def find_layers(module, layers=[nn.Linear], name=''):
"""
Recursively find the layers of a certain type in a module.
Args:
module (nn.Module): PyTorch module.
layers (list): List of layer types to find.
name (str): Name of the module.
Returns:
dict: Dictionary of layers of the given type(s) within the module.
"""
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(
child, layers=layers, name=name + '.' + name1 if name != '' else name1
))
return res
def check_sparsity(model):
"""
Check the sparsity of the weights in different layers of the model.
Args:
model (nn.Module): The model to check.
Returns:
float: Ratio of the count of non-zero weights to total parameters in the model.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
intermediate_size = model.config.intermediate_size
hidden_size = model.config.hidden_size
count = 0
total_params = 0
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
sub_count = 0
sub_params = 0
for name in subset:
W = subset[name].weight.data
sub_count += W.numel()
count += W.numel()
if 'self_attn' in name:
total_params += hidden_size * hidden_size
sub_params += hidden_size * hidden_size
else:
total_params += hidden_size * intermediate_size
sub_params += hidden_size * intermediate_size
if subset[name].bias is not None:
count += subset[name].bias.data.numel()
sub_count += subset[name].bias.data.numel()
print(f"layer {i} sparsity {float(sub_count)/sub_params:.6f}")
model.config.use_cache = use_cache
return float(count)/total_params
def prepare_calibration_input(model, dataloader, device):
"""
Prepare inputs for model calibration.
Args:
model (nn.Module): The model to prepare inputs for.
dataloader (DataLoader): DataLoader object to fetch input data.
device (torch.device): Device on which the model is loaded.
Returns:
inps (torch.Tensor): Input tensor for calibration.
outs (torch.Tensor): Output tensor for calibration.
attention_mask (torch.Tensor): Attention mask tensor.
position_ids (torch.Tensor): Position IDs tensor.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
if "model.embed_tokens" in getattr(model, 'hf_device_map', {}):
device = model.hf_device_map["model.embed_tokens"]
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((2048, model.seqlen, model.config.hidden_size), dtype=dtype, device=device)
inps.requires_grad = False
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(device))
except ValueError:
pass
layers[0] = layers[0].module
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
model.config.use_cache = use_cache
return inps, outs, attention_mask, position_ids
def compress(layer, attn_mask, mlp_mask, attn_mean_inp, mlp_mean_inp, device, bias=True, unstr=False):
"""
Compress a model layer by masking or pruning based on the given masks.
Args:
layer (nn.Module): The model layer to compress.
attn_mask (torch.Tensor): The mask to apply to the attention weights.
mlp_mask (torch.Tensor): The mask to apply to the MLP weights.
attn_mean_inp (torch.Tensor): The mean attention input.
mlp_mean_inp (torch.Tensor): The mean MLP input.
device (torch.device): Device on which the model is loaded.
bias (bool, optional): Whether to consider bias while compressing. Defaults to True.
unstr (bool, optional): If True, only mask without real pruning. Defaults to False.
Returns:
None: This function modifies the layer in-place and doesn't return anything.
"""
if unstr: # Only mask, do not really prune
# Attention Weight Masking
if attn_mask is not None:
retain_heads = torch.count_nonzero(attn_mask)
attn_mask = attn_mask.repeat_interleave(128)
# Apply the mask to the query, key and value projection weights
layer.self_attn.q_proj.weight.data *= attn_mask.unsqueeze(-1).to(device)
layer.self_attn.k_proj.weight.data *= attn_mask.unsqueeze(-1).to(device)
layer.self_attn.v_proj.weight.data *= attn_mask.unsqueeze(-1).to(device)
output_weight = layer.self_attn.o_proj.weight.data
if bias:
# Add the additional bias to compensate for the loss
output_bias = ((attn_mean_inp * ~attn_mask.to(device)) @ output_weight.T)
# Note: the weight data is masked, but the weight tensor shape remains unchanged
if bias:
layer.self_attn.o_proj.bias.data = output_bias
layer.self_attn.o_proj.weight.data = output_weight
# MLP Weight Masking
if mlp_mask is not None:
# Apply the mask to the up and gate projection weights
layer.mlp.up_proj.weight.data *= mlp_mask.unsqueeze(-1).to(device)
layer.mlp.gate_proj.weight.data *= mlp_mask.unsqueeze(-1).to(device)
output_weight = layer.mlp.down_proj.weight.data
if bias:
# Add the additional bias to compensate for the loss
output_bias = ((mlp_mean_inp * ~mlp_mask.to(device)) @ output_weight.T)
# Note: the weight data is masked, but the weight tensor shape remains unchanged
if bias:
layer.mlp.down_proj.bias.data = output_bias
layer.mlp.down_proj.weight.data = output_weight
else:
# Real Pruning
# Attention Weight Pruning
if attn_mask is not None:
retain_heads = torch.count_nonzero(attn_mask)
attn_mask = attn_mask.repeat_interleave(128)
# Prune the query, key and value projection weights
# We reduce the size of the weights based on the attention mask
layer.self_attn.q_proj.weight.data = layer.self_attn.q_proj.weight.data[torch.where(attn_mask)[0]]
layer.self_attn.k_proj.weight.data = layer.self_attn.k_proj.weight.data[torch.where(attn_mask)[0]]
layer.self_attn.v_proj.weight.data = layer.self_attn.v_proj.weight.data[torch.where(attn_mask)[0]]
# Update output dimensions of q, k, v projections based on remaining heads
layer.self_attn.q_proj.out_features = attn_mask.sum().item()
layer.self_attn.k_proj.out_features = attn_mask.sum().item()
layer.self_attn.v_proj.out_features = attn_mask.sum().item()
output_weight = layer.self_attn.o_proj.weight.data
if bias:
# Add the additional bias to compensate for the loss
output_bias = ((attn_mean_inp * ~attn_mask.to(device)) @ output_weight.T)
# Prune the output projection weight
output_weight = layer.self_attn.o_proj.weight.data[:, torch.where(attn_mask)[0]]
# Update layer configurations for the new output shape after pruning
layer.self_attn.num_heads = retain_heads
layer.self_attn.hidden_size = retain_heads * 128
if bias:
# Re-initialize the Linear layer with new shape and bias
layer.self_attn.o_proj.in_features = attn_mask.sum().item()
# layer.self_attn.o_proj = torch.nn.Linear(in_features=output_weight.shape[1], out_features=output_weight.shape[0], bias=True).to(device)
layer.self_attn.o_proj.bias.data = output_bias
# Assign the pruned weights
layer.self_attn.o_proj.weight.data = output_weight
# MLP Weight Pruning
if mlp_mask is not None:
# Prune the up and gate projection weights
layer.mlp.up_proj.weight.data = layer.mlp.up_proj.weight.data[torch.where(mlp_mask)[0]]
layer.mlp.gate_proj.weight.data = layer.mlp.gate_proj.weight.data[torch.where(mlp_mask)[0]]
# Update output dimensions of up and gate projections based on the mlp mask
layer.mlp.up_proj.out_features = mlp_mask.sum().item()
layer.mlp.gate_proj.out_features = mlp_mask.sum().item()
output_weight = layer.mlp.down_proj.weight.data
layer.mlp.intermediate_size = mlp_mask.sum().item()
if bias:
# Add the additional bias to compensate for the loss
output_bias = ((mlp_mean_inp * ~mlp_mask.to(device)) @ output_weight.T)
# Prune the down projection weight
output_weight = layer.mlp.down_proj.weight.data[:, torch.where(mlp_mask)[0]]
if bias:
# Re-initialize the Linear layer with new shape and bias
layer.mlp.down_proj.in_features = mlp_mask.sum().item()
# layer.mlp.down_proj = torch.nn.Linear(in_features=output_weight.shape[1], out_features=output_weight.shape[0], bias=True).to(device)
layer.mlp.down_proj.bias.data = output_bias
# Assign the pruned weights
layer.mlp.down_proj.weight.data = output_weight
# Explicitly empty the CUDA cache to clean up some memory
torch.cuda.empty_cache()
def cal_remove_neuron(args, model):
intermediate_size = model.config.intermediate_size
hidden_size = model.config.hidden_size
num_layers = model.config.num_hidden_layers
if args.structure == "UL-MM":
remove_params = args.pruning_ratio * (intermediate_size * hidden_size * 3 + hidden_size * hidden_size * 4)
remove_head_params = hidden_size * 4 * (args.remove_heads // num_layers) * 128
return int((remove_params - remove_head_params) / (hidden_size * 3))
else:
remove_params = num_layers * args.pruning_ratio * (intermediate_size * hidden_size * 3 + hidden_size * hidden_size * 4)
remove_head_params = hidden_size * 4 * args.remove_heads * 128
return int((remove_params - remove_head_params) / (hidden_size * 3))
def prune_flap(args, model, tokenizer, device=torch.device("cuda:0")):
"""
Our FLAP Pruning.
Args:
args (object): Command line arguments parsed via argparse.
model (nn.Module): PyTorch model to prune.
tokenizer (Tokenizer): Tokenizer associated with the model.
device (torch.device, optional): Device to move tensors to. Defaults to CUDA device 0.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
print("loading calibdation data")
dataloader, _ = get_loaders("wikitext2", nsamples=args.nsamples,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, device)
layers = model.model.layers
attn_metric_list, mlp_metric_list = [], []
attn_baseline_inp_list, mlp_baseline_inp_list = [], []
attn_mask, mlp_mask = [], []
# Split into sub-problems, separate statistics for each module
for i in tqdm(range(len(layers)), desc="Processing layers"):
layer = layers[i]
subset = {}
subset.update({'self_attn.o_proj': find_layers(layer)['self_attn.o_proj']})
subset.update({'mlp.down_proj': find_layers(layer)['mlp.down_proj']})
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}): ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
wrapped_layers[name] = BiasGPT(subset[name], args.metrics)
def add_batch(name):
def tmp(_, inp, out):
wrapped_layers[name].add_batch(inp[0].data, out.data)
return tmp
handles = []
for name in wrapped_layers:
handles.append(subset[name].register_forward_hook(add_batch(name)))
for j in range(args.nsamples):
with torch.no_grad():
outs[j] = layer(inps[j].unsqueeze(0), attention_mask=attention_mask, position_ids=position_ids)[0]
for h in handles:
h.remove()
for name in subset:
if name == 'self_attn.o_proj':
W_metric = metrics[args.metrics](wrapped_layers, subset, name) ** 2
if args.structure == "UL-UM":
W_metric = W_metric.reshape(-1, 128).sum(dim=1)
thresh = torch.sort(W_metric.cuda())[0][int(args.pruning_ratio*layer.self_attn.num_heads)].cpu()
W_mask = (W_metric>=thresh)
attn_mask.append(W_mask)
elif args.structure == "UL-MM":
W_metric = W_metric.reshape(-1, 128).sum(dim=1)
thresh = torch.sort(W_metric.cuda())[0][args.remove_heads // len(layers)].cpu()
W_mask = (W_metric>=thresh)
attn_mask.append(W_mask)
else:
attn_metric_list.append(W_metric.cpu())
attn_baseline_inp_list.append(wrapped_layers[name].baseline_inp.type(torch.half))
else:
W_metric = metrics[args.metrics](wrapped_layers, subset, name)
if args.structure == "UL-UM":
thresh = torch.sort(W_metric.cuda())[0][int(W_metric.numel()*args.pruning_ratio)].cpu()
W_mask = (W_metric>=thresh)
mlp_mask.append(W_mask)
elif args.structure == "UL-MM":
thresh = torch.sort(W_metric.cuda())[0][cal_remove_neuron(args, model)].cpu()
W_mask = (W_metric>=thresh)
mlp_mask.append(W_mask)
else:
mlp_metric_list.append(W_metric.cpu())
mlp_baseline_inp_list.append(wrapped_layers[name].baseline_inp.type(torch.half))
wrapped_layers[name].free()
inps, outs = outs, inps # Use the original output as input to the next layer
torch.cuda.empty_cache()
standarlization = lambda x: (x - torch.mean(x, axis=1, keepdim=True)) / torch.std(x, axis=1, keepdim=True)
if args.structure in ["AL-MM", "AL-AM"]:
attn_metric = torch.stack(attn_metric_list)
attn_metric = standarlization(attn_metric)
attn_metric = attn_metric.reshape(len(layers), -1, 128).mean(dim=2)
mlp_metric = torch.stack(mlp_metric_list)
mlp_metric = standarlization(mlp_metric)
if args.structure == "AL-MM":
sorted_attn = torch.sort(attn_metric.view(-1), descending=True)[0]
attn_thres = sorted_attn[-int(args.remove_heads)]
attn_mask = (attn_metric > attn_thres) # 1 means retain
sorted_mlp = torch.sort(mlp_metric.view(-1), descending=True)[0]
mlp_thres = sorted_mlp[-cal_remove_neuron(args, model)]
mlp_mask = (mlp_metric > mlp_thres)
else:
prune_metric = torch.cat([attn_metric.view(-1), mlp_metric.view(-1)])
sorted_prune, indices = torch.sort(prune_metric, descending=True)
compression_weight = torch.ones_like(indices)
compression_weight[indices < attn_metric.numel()] = 512.0 / 3
threshold = sorted_prune[torch.argmin(torch.abs(torch.cumsum(compression_weight, 0) - torch.sum(compression_weight)*(1 - args.pruning_ratio)))]
attn_mask = (attn_metric > threshold)
mlp_mask = (mlp_metric > threshold)
else:
attn_mask = torch.stack(attn_mask)
mlp_mask = torch.stack(mlp_mask)
for idx in range(len(layers)):
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}):
compress(model.model.layers[idx], attn_mask[idx], None, attn_baseline_inp_list[idx], None, model.hf_device_map[f"model.layers.{idx}"], unstr=args.unstr)
else:
compress(model.model.layers[idx], attn_mask[idx], None, attn_baseline_inp_list[idx], None, device, unstr=args.unstr)
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}):
compress(model.model.layers[idx], None, mlp_mask[idx], None, mlp_baseline_inp_list[idx], model.hf_device_map[f"model.layers.{idx}"], unstr=args.unstr)
else:
compress(model.model.layers[idx], None, mlp_mask[idx], None, mlp_baseline_inp_list[idx], device, unstr=args.unstr)
model.config.use_cache = use_cache
torch.cuda.empty_cache()
def prune_wanda_sp(args, model, tokenizer, device=torch.device("cuda:0")):
"""
Wanda on structured pruning.
Args:
args (object): Command line arguments parsed via argparse.
model (nn.Module): PyTorch model to prune.
tokenizer (Tokenizer): Tokenizer associated with the model.
device (torch.device, optional): Device to move tensors to. Defaults to CUDA device 0.
"""
use_cache = model.config.use_cache
model.config.use_cache = False
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=128,seed=args.seed,seqlen=model.seqlen,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = {}
subset.update({'self_attn.o_proj': find_layers(layer)['self_attn.o_proj']})
subset.update({'mlp.down_proj': find_layers(layer)['mlp.down_proj']})
if f"model.layers.{i}" in getattr(model, 'hf_device_map', {}): ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset: | wrapped_layers[name] = WrappedGPT(subset[name]) | 0 | 2023-12-18 06:28:41+00:00 | 8k |
alibaba/u2mot | yolox/tracker/u2mot_tracker.py | [
{
"identifier": "BaseTrack",
"path": "yolox/tracker/basetrack.py",
"snippet": "class BaseTrack(object):\n _count = 0\n\n track_id = 0\n is_activated = False\n state = TrackState.New\n\n history = OrderedDict()\n features = []\n curr_feature = None\n score = 0\n start_frame = 0\n frame_id = 0\n time_since_update = 0\n\n # multi-camera\n location = (np.inf, np.inf)\n\n @property\n def end_frame(self):\n return self.frame_id\n\n @staticmethod\n def next_id():\n BaseTrack._count += 1\n return BaseTrack._count\n\n def activate(self, *args):\n raise NotImplementedError\n\n def predict(self):\n raise NotImplementedError\n\n def update(self, *args, **kwargs):\n raise NotImplementedError\n\n def mark_lost(self):\n self.state = TrackState.Lost\n\n def mark_long_lost(self):\n self.state = TrackState.LongLost\n\n def mark_removed(self):\n self.state = TrackState.Removed\n\n @staticmethod\n def clear_count():\n BaseTrack._count = 0"
},
{
"identifier": "TrackState",
"path": "yolox/tracker/basetrack.py",
"snippet": "class TrackState(object):\n New = 0\n Tracked = 1\n Lost = 2\n LongLost = 3\n Removed = 4"
},
{
"identifier": "KalmanFilter",
"path": "yolox/tracker/kalman_filter.py",
"snippet": "class KalmanFilter(object):\n \"\"\"\n A simple Kalman filter for tracking bounding boxes in image space.\n\n The 8-dimensional state space\n\n x, y, w, h, vx, vy, vw, vh\n\n contains the bounding box center position (x, y), width w, height h,\n and their respective velocities.\n\n Object motion follows a constant velocity model. The bounding box location\n (x, y, w, h) is taken as direct observation of the state space (linear\n observation model).\n\n \"\"\"\n\n def __init__(self):\n ndim, dt = 4, 1.\n\n # Create Kalman filter model matrices.\n self._motion_mat = np.eye(2 * ndim, 2 * ndim)\n for i in range(ndim):\n self._motion_mat[i, ndim + i] = dt\n self._update_mat = np.eye(ndim, 2 * ndim)\n\n # Motion and observation uncertainty are chosen relative to the current\n # state estimate. These weights control the amount of uncertainty in\n # the model. This is a bit hacky.\n self._std_weight_position = 1. / 20\n self._std_weight_velocity = 1. / 160\n\n def initiate(self, measurement):\n \"\"\"Create track from unassociated measurement.\n\n Parameters\n ----------\n measurement : ndarray\n Bounding box coordinates (x, y, w, h) with center position (x, y),\n width w, and height h.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector (8 dimensional) and covariance matrix (8x8\n dimensional) of the new track. Unobserved velocities are initialized\n to 0 mean.\n\n \"\"\"\n mean_pos = measurement\n mean_vel = np.zeros_like(mean_pos)\n mean = np.r_[mean_pos, mean_vel]\n\n std = [\n 2 * self._std_weight_position * measurement[2],\n 2 * self._std_weight_position * measurement[3],\n 2 * self._std_weight_position * measurement[2],\n 2 * self._std_weight_position * measurement[3],\n 10 * self._std_weight_velocity * measurement[2],\n 10 * self._std_weight_velocity * measurement[3],\n 10 * self._std_weight_velocity * measurement[2],\n 10 * self._std_weight_velocity * measurement[3]]\n covariance = np.diag(np.square(std))\n return mean, covariance\n\n def predict(self, mean, covariance):\n \"\"\"Run Kalman filter prediction step.\n\n Parameters\n ----------\n mean : ndarray\n The 8 dimensional mean vector of the object state at the previous\n time step.\n covariance : ndarray\n The 8x8 dimensional covariance matrix of the object state at the\n previous time step.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector and covariance matrix of the predicted\n state. Unobserved velocities are initialized to 0 mean.\n\n \"\"\"\n std_pos = [\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3],\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3]]\n std_vel = [\n self._std_weight_velocity * mean[2],\n self._std_weight_velocity * mean[3],\n self._std_weight_velocity * mean[2],\n self._std_weight_velocity * mean[3]]\n motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))\n\n mean = np.dot(mean, self._motion_mat.T)\n covariance = np.linalg.multi_dot((\n self._motion_mat, covariance, self._motion_mat.T)) + motion_cov\n\n return mean, covariance\n\n def project(self, mean, covariance):\n \"\"\"Project state distribution to measurement space.\n\n Parameters\n ----------\n mean : ndarray\n The state's mean vector (8 dimensional array).\n covariance : ndarray\n The state's covariance matrix (8x8 dimensional).\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the projected mean and covariance matrix of the given state\n estimate.\n\n \"\"\"\n std = [\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3],\n self._std_weight_position * mean[2],\n self._std_weight_position * mean[3]]\n innovation_cov = np.diag(np.square(std))\n\n mean = np.dot(self._update_mat, mean)\n covariance = np.linalg.multi_dot((\n self._update_mat, covariance, self._update_mat.T))\n return mean, covariance + innovation_cov\n\n def multi_predict(self, mean, covariance):\n \"\"\"Run Kalman filter prediction step (Vectorized version).\n Parameters\n ----------\n mean : ndarray\n The Nx8 dimensional mean matrix of the object states at the previous\n time step.\n covariance : ndarray\n The Nx8x8 dimensional covariance matrics of the object states at the\n previous time step.\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector and covariance matrix of the predicted\n state. Unobserved velocities are initialized to 0 mean.\n \"\"\"\n std_pos = [\n self._std_weight_position * mean[:, 2],\n self._std_weight_position * mean[:, 3],\n self._std_weight_position * mean[:, 2],\n self._std_weight_position * mean[:, 3]]\n std_vel = [\n self._std_weight_velocity * mean[:, 2],\n self._std_weight_velocity * mean[:, 3],\n self._std_weight_velocity * mean[:, 2],\n self._std_weight_velocity * mean[:, 3]]\n sqr = np.square(np.r_[std_pos, std_vel]).T\n\n motion_cov = []\n for i in range(len(mean)):\n motion_cov.append(np.diag(sqr[i]))\n motion_cov = np.asarray(motion_cov)\n\n mean = np.dot(mean, self._motion_mat.T)\n left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))\n covariance = np.dot(left, self._motion_mat.T) + motion_cov\n\n return mean, covariance\n\n def update(self, mean, covariance, measurement):\n \"\"\"Run Kalman filter correction step.\n\n Parameters\n ----------\n mean : ndarray\n The predicted state's mean vector (8 dimensional).\n covariance : ndarray\n The state's covariance matrix (8x8 dimensional).\n measurement : ndarray\n The 4 dimensional measurement vector (x, y, w, h), where (x, y)\n is the center position, w the width, and h the height of the\n bounding box.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the measurement-corrected state distribution.\n\n \"\"\"\n projected_mean, projected_cov = self.project(mean, covariance)\n\n chol_factor, lower = scipy.linalg.cho_factor(\n projected_cov, lower=True, check_finite=False)\n kalman_gain = scipy.linalg.cho_solve(\n (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,\n check_finite=False).T\n innovation = measurement - projected_mean\n\n new_mean = mean + np.dot(innovation, kalman_gain.T)\n new_covariance = covariance - np.linalg.multi_dot((\n kalman_gain, projected_cov, kalman_gain.T))\n return new_mean, new_covariance\n\n def gating_distance(self, mean, covariance, measurements,\n only_position=False, metric='maha'):\n \"\"\"Compute gating distance between state distribution and measurements.\n A suitable distance threshold can be obtained from `chi2inv95`. If\n `only_position` is False, the chi-square distribution has 4 degrees of\n freedom, otherwise 2.\n Parameters\n ----------\n mean : ndarray\n Mean vector over the state distribution (8 dimensional).\n covariance : ndarray\n Covariance of the state distribution (8x8 dimensional).\n measurements : ndarray\n An Nx4 dimensional matrix of N measurements, each in\n format (x, y, a, h) where (x, y) is the bounding box center\n position, a the aspect ratio, and h the height.\n only_position : Optional[bool]\n If True, distance computation is done with respect to the bounding\n box center position only.\n Returns\n -------\n ndarray\n Returns an array of length N, where the i-th element contains the\n squared Mahalanobis distance between (mean, covariance) and\n `measurements[i]`.\n \"\"\"\n mean, covariance = self.project(mean, covariance)\n if only_position:\n mean, covariance = mean[:2], covariance[:2, :2]\n measurements = measurements[:, :2]\n\n d = measurements - mean\n if metric == 'gaussian':\n return np.sum(d * d, axis=1)\n elif metric == 'maha':\n cholesky_factor = np.linalg.cholesky(covariance)\n z = scipy.linalg.solve_triangular(\n cholesky_factor, d.T, lower=True, check_finite=False,\n overwrite_b=True)\n squared_maha = np.sum(z * z, axis=0)\n return squared_maha\n else:\n raise ValueError('invalid distance metric')"
},
{
"identifier": "GMC",
"path": "yolox/tracker/gmc.py",
"snippet": "class GMC:\n def __init__(self, method='orb', downscale=2, verbose=None):\n super(GMC, self).__init__()\n\n self.method = method\n self.downscale = max(1, int(downscale))\n\n if self.method == 'orb':\n self.detector = cv2.FastFeatureDetector_create(20)\n self.extractor = cv2.ORB_create()\n self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)\n seqName = verbose[0]\n fileDir = verbose[1]\n\n if '-FRCNN' in seqName:\n seqName = seqName[:-6]\n elif '-DPM' in seqName:\n seqName = seqName[:-4]\n elif '-SDP' in seqName:\n seqName = seqName[:-4]\n \n self.gmcFile = open(f\"yolox/tracker/GMC_files/{fileDir}/GMC-{seqName}.txt\", 'w+')\n\n elif self.method == 'sift':\n self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)\n self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)\n self.matcher = cv2.BFMatcher(cv2.NORM_L2)\n\n elif self.method == 'ecc':\n number_of_iterations = 5000\n termination_eps = 1e-6\n self.warp_mode = cv2.MOTION_EUCLIDEAN\n self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)\n\n elif self.method == 'file' or self.method == 'files':\n seqName = verbose[0]\n # MOT17_ablation, MOTChallenge, VisDrone/test-dev, BDD100K/val, BDD100K/test\n fileDir = verbose[1]\n filePath = f'yolox/tracker/GMC_files/{fileDir}'\n\n if '-FRCNN' in seqName:\n seqName = seqName[:-6]\n elif '-DPM' in seqName:\n seqName = seqName[:-4]\n elif '-SDP' in seqName:\n seqName = seqName[:-4]\n\n self.gmcFile = open(filePath + \"/GMC-\" + seqName + \".txt\", 'r')\n\n if self.gmcFile is None:\n raise ValueError(\"Error: Unable to open GMC file in directory:\" + filePath)\n elif self.method == 'none' or self.method == 'None':\n self.method = 'none'\n else:\n raise ValueError(\"Error: Unknown CMC method:\" + method)\n\n self.prevFrame = None\n self.prevKeyPoints = None\n self.prevDescriptors = None\n\n self.initializedFirstFrame = False\n self.frameCnt = 0\n\n def apply(self, raw_frame, detections=None):\n if self.method == 'orb' or self.method == 'sift':\n try:\n H = self.applyFeaures(raw_frame, detections)\n except:\n H = np.array([[1., 0., 0.], [0., 1., 0.]])\n self.gmcFile.write('%d\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t%.6f\\t\\n' % \\\n (self.frameCnt, H[0, 0], H[0, 1], H[0, 2], H[1, 0], H[1, 1], H[1, 2]))\n self.frameCnt += 1\n return H\n elif self.method == 'ecc':\n return self.applyEcc(raw_frame, detections)\n elif self.method == 'file':\n return self.applyFile(raw_frame, detections)\n elif self.method == 'none':\n return np.eye(2, 3)\n else:\n return np.eye(2, 3)\n\n def applyEcc(self, raw_frame, detections=None):\n\n # Initialize\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3, dtype=np.float32)\n\n # Downscale image (TODO: consider using pyramids)\n if self.downscale > 1.0:\n frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n width = width // self.downscale\n height = height // self.downscale\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Run the ECC algorithm. The results are stored in warp_matrix.\n # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)\n try:\n (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)\n except:\n print('Warning: find transform failed. Set warp as identity')\n\n return H\n\n def applyFeaures(self, raw_frame, detections=None):\n\n # Initialize\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3)\n\n # Downscale image (TODO: consider using pyramids)\n if self.downscale > 1.0:\n # frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n width = width // self.downscale\n height = height // self.downscale\n\n # find the keypoints\n mask = np.zeros_like(frame)\n # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255\n mask[int(0.02 * height): int(0.98 * height), int(0.02 * width): int(0.98 * width)] = 255\n if detections is not None:\n for det in detections:\n tlbr = (det[:4] / self.downscale).astype(np.int_)\n mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0\n\n keypoints = self.detector.detect(frame, mask)\n\n # compute the descriptors\n keypoints, descriptors = self.extractor.compute(frame, keypoints)\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Match descriptors.\n knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)\n\n # Filtered matches based on smallest spatial distance\n matches = []\n spatialDistances = []\n\n maxSpatialDistance = 0.25 * np.array([width, height])\n\n # Handle empty matches case\n if len(knnMatches) == 0:\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n return H\n\n for m, n in knnMatches:\n if m.distance < 0.9 * n.distance:\n prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt\n currKeyPointLocation = keypoints[m.trainIdx].pt\n\n spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],\n prevKeyPointLocation[1] - currKeyPointLocation[1])\n\n if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \\\n (np.abs(spatialDistance[1]) < maxSpatialDistance[1]):\n spatialDistances.append(spatialDistance)\n matches.append(m)\n\n meanSpatialDistances = np.mean(spatialDistances, 0)\n stdSpatialDistances = np.std(spatialDistances, 0)\n\n inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances\n\n goodMatches = []\n prevPoints = []\n currPoints = []\n for i in range(len(matches)):\n if inliesrs[i, 0] and inliesrs[i, 1]:\n goodMatches.append(matches[i])\n prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)\n currPoints.append(keypoints[matches[i].trainIdx].pt)\n\n prevPoints = np.array(prevPoints)\n currPoints = np.array(currPoints)\n\n # Draw the keypoint matches on the output image\n if 0:\n matches_img = np.hstack((self.prevFrame, frame))\n matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)\n W = np.size(self.prevFrame, 1)\n for m in goodMatches:\n prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)\n curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)\n curr_pt[0] += W\n color = np.random.randint(0, 255, (3,))\n color = (int(color[0]), int(color[1]), int(color[2]))\n\n matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)\n matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)\n matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)\n\n plt.figure()\n plt.imshow(matches_img)\n plt.show()\n\n # Find rigid matrix\n if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):\n H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)\n\n # Handle downscale\n if self.downscale > 1.0:\n H[0, 2] *= self.downscale\n H[1, 2] *= self.downscale\n else:\n print('Warning: not enough matching points')\n\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n return H\n\n def applyFile(self, raw_frame=None, detections=None):\n line = self.gmcFile.readline()\n tokens = line.split(\"\\t\")\n H = np.eye(2, 3, dtype=np.float_)\n if len(tokens) > 6:\n H[0, 0] = float(tokens[1])\n H[0, 1] = float(tokens[2])\n H[0, 2] = float(tokens[3])\n H[1, 0] = float(tokens[4])\n H[1, 1] = float(tokens[5])\n H[1, 2] = float(tokens[6])\n\n return H"
}
] | import numpy as np
from collections import deque
from .basetrack import BaseTrack, TrackState
from .kalman_filter import KalmanFilter
from .gmc import GMC
from . import matching | 5,956 | #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score, cls=0, feat=None, feat_history=50):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.cls = -1
self.cls_hist = [] # (cls id, freq)
self.update_cls(cls, score)
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.curr_feat = None
self.features = deque([], maxlen=feat_history)
if feat is not None:
self.update_features(feat)
self.alpha = 0.9
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def update_cls(self, cls, score):
if len(self.cls_hist) > 0:
max_freq = 0
found = False
for c in self.cls_hist:
if cls == c[0]:
c[1] += score
found = True
if c[1] > max_freq:
max_freq = c[1]
self.cls = c[0]
if not found:
self.cls_hist.append([cls, score])
self.cls = cls
else:
self.cls_hist.append([cls, score])
self.cls = cls
def predict(self):
mean_state = self.mean.copy()
| #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
class STrack(BaseTrack):
shared_kalman = KalmanFilter()
def __init__(self, tlwh, score, cls=0, feat=None, feat_history=50):
# wait activate
self._tlwh = np.asarray(tlwh, dtype=np.float)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.cls = -1
self.cls_hist = [] # (cls id, freq)
self.update_cls(cls, score)
self.score = score
self.tracklet_len = 0
self.smooth_feat = None
self.curr_feat = None
self.features = deque([], maxlen=feat_history)
if feat is not None:
self.update_features(feat)
self.alpha = 0.9
def update_features(self, feat):
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def update_cls(self, cls, score):
if len(self.cls_hist) > 0:
max_freq = 0
found = False
for c in self.cls_hist:
if cls == c[0]:
c[1] += score
found = True
if c[1] > max_freq:
max_freq = c[1]
self.cls = c[0]
if not found:
self.cls_hist.append([cls, score])
self.cls = cls
else:
self.cls_hist.append([cls, score])
self.cls = cls
def predict(self):
mean_state = self.mean.copy() | if self.state != TrackState.Tracked: | 1 | 2023-12-18 10:04:40+00:00 | 8k |
liuhuang31/HiFTNet-sr | train.py | [
{
"identifier": "AttrDict",
"path": "env.py",
"snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self"
},
{
"identifier": "build_env",
"path": "env.py",
"snippet": "def build_env(config, config_name, path):\n t_path = os.path.join(path, config_name)\n if config != t_path:\n os.makedirs(path, exist_ok=True)\n shutil.copyfile(config, os.path.join(path, config_name))"
},
{
"identifier": "MelDataset",
"path": "meldataset.py",
"snippet": "class MelDataset(torch.utils.data.Dataset):\n def __init__(self, training_files, segment_size, n_fft, num_mels,\n hop_size, win_size, sampling_rate, fmin, fmax, split=True, shuffle=True, n_cache_reuse=1,\n device=None, fmax_loss=None, fine_tuning=False, base_mels_path=None):\n self.audio_files = training_files\n random.seed(1234)\n if shuffle:\n random.shuffle(self.audio_files)\n self.segment_size = segment_size\n self.sampling_rate = sampling_rate\n self.split = split\n self.n_fft = n_fft\n self.num_mels = num_mels\n self.hop_size = hop_size\n self.win_size = win_size\n self.fmin = fmin\n self.fmax = fmax\n self.fmax_loss = fmax_loss\n self.cached_wav = None\n self.n_cache_reuse = n_cache_reuse\n self._cache_ref_count = 0\n self.device = device\n self.fine_tuning = fine_tuning\n self.base_mels_path = base_mels_path\n\n def __getitem__(self, index):\n filename = self.audio_files[index]\n if self._cache_ref_count == 0:\n audio, sampling_rate = load_wav(filename, self.sampling_rate)\n # audio = audio / MAX_WAV_VALUE\n if not self.fine_tuning:\n audio = normalize(audio) * 0.95\n self.cached_wav = audio\n if sampling_rate != self.sampling_rate:\n raise ValueError(\"{} SR doesn't match target {} SR\".format(\n sampling_rate, self.sampling_rate))\n self._cache_ref_count = self.n_cache_reuse\n else:\n audio = self.cached_wav\n self._cache_ref_count -= 1\n\n audio = torch.FloatTensor(audio)\n audio = audio.unsqueeze(0)\n\n if not self.fine_tuning:\n if self.split:\n if audio.size(1) >= self.segment_size:\n max_audio_start = audio.size(1) - self.segment_size\n audio_start = random.randint(0, max_audio_start)\n audio = audio[:, audio_start:audio_start+self.segment_size]\n else:\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax,\n center=False, training=True)\n else:\n mel = np.load(\n os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))\n mel = torch.from_numpy(mel)\n\n if len(mel.shape) < 3:\n mel = mel.unsqueeze(0)\n\n if self.split:\n frames_per_seg = math.ceil(self.segment_size / self.hop_size)\n\n if audio.size(1) >= self.segment_size:\n mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1)\n mel = mel[:, :, mel_start:mel_start + frames_per_seg]\n audio = audio[:, mel_start * self.hop_size:(mel_start + frames_per_seg) * self.hop_size]\n else:\n mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), 'constant')\n audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), 'constant')\n\n mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,\n self.sampling_rate, self.hop_size, self.win_size, self.fmin, self.fmax_loss,\n center=False)\n\n return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())\n\n def __len__(self):\n return len(self.audio_files)"
},
{
"identifier": "mel_spectrogram",
"path": "meldataset.py",
"snippet": "def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False, training=False):\n # if torch.min(y) < -1.:\n # print('min value is ', torch.min(y))\n # if torch.max(y) > 1.:\n # print('max value is ', torch.max(y))\n if training:\n with torch.no_grad():\n # 16k to 24k/48k\n if fmax <= 8000 and (sampling_rate == 24000 or sampling_rate == 48000):\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 16000)\n y = librosa.resample(y, 16000, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n # 24k to 48k\n elif fmax <= 12000 and sampling_rate == 48000:\n y = y.squeeze().cpu().numpy()\n y = librosa.resample(y, sampling_rate, 24000)\n y = torch.FloatTensor(y)\n y = y.unsqueeze(0)\n sampling_rate = 24000\n n_fft = int(n_fft/2)\n hop_size=int(hop_size/2)\n win_size=int(win_size/2)\n else:\n pass\n\n global mel_basis, hann_window\n if fmax not in mel_basis:\n mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)\n mel_basis[str(fmax)+'_'+str(y.device)] = torch.from_numpy(mel).float().to(y.device)\n hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)\n\n y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')\n y = y.squeeze(1)\n\n # complex tensor as default, then use view_as_real for future pytorch compatibility\n spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],\n center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=True)\n spec = torch.view_as_real(spec)\n spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))\n\n spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec"
},
{
"identifier": "get_dataset_filelist",
"path": "meldataset.py",
"snippet": "def get_dataset_filelist(a):\n training_files =[]\n validation_files =[]\n total_files = 0\n input_wave_dirs = a.input_wavs_dir.split(\",\")\n\n for wave_dir in input_wave_dirs:\n num_validation_files = 3\n files_under_path = 0\n allfiles = find_all_wav_path(wave_dir)\n for input_file_name in allfiles:\n if not os.path.splitext(input_file_name)[-1] == '.wav':\n continue\n files_under_path +=1\n full_file_path = input_file_name\n if num_validation_files <=0:\n training_files.append(full_file_path)\n else:\n validation_files.append(full_file_path)\n num_validation_files -=1\n if files_under_path == 0:\n raise Exception(\"no wave file found!\")\n total_files +=files_under_path\n print(f'total files:{total_files}')\n \n return training_files, validation_files"
},
{
"identifier": "Generator",
"path": "models.py",
"snippet": "class Generator(torch.nn.Module):\n def __init__(self, h, F0_model):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.m_source = SourceModuleHnNSF(\n sampling_rate=h.sampling_rate,\n upsample_scale=np.prod(h.upsample_rates) * h.gen_istft_hop_size,\n harmonic_num=8, voiced_threshod=10)\n self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h.upsample_rates) * h.gen_istft_hop_size)\n self.noise_convs = nn.ModuleList()\n self.noise_res = nn.ModuleList()\n \n self.F0_model = F0_model\n \n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),\n k, u, padding=(k-u)//2)))\n\n c_cur = h.upsample_initial_channel // (2 ** (i + 1))\n \n if i + 1 < len(h.upsample_rates): #\n stride_f0 = np.prod(h.upsample_rates[i + 1:])\n self.noise_convs.append(Conv1d(\n h.gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))\n self.noise_res.append(resblock(h, c_cur, 7, [1,3,5]))\n else:\n self.noise_convs.append(Conv1d(h.gen_istft_n_fft + 2, c_cur, kernel_size=1))\n self.noise_res.append(resblock(h, c_cur, 11, [1,3,5]))\n \n self.resblocks = nn.ModuleList()\n for i in range(len(self.ups)):\n ch = h.upsample_initial_channel//(2**(i+1))\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.post_n_fft = h.gen_istft_n_fft\n self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))\n self.stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft)\n\n def forward(self, x):\n f0, _, _ = self.F0_model(x.unsqueeze(1))\n if len(f0.shape) == 1:\n f0 = f0.unsqueeze(0)\n \n f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t\n\n har_source, _, _ = self.m_source(f0)\n har_source = har_source.transpose(1, 2).squeeze(1)\n har_spec, har_phase = self.stft.transform(har_source)\n har = torch.cat([har_spec, har_phase], dim=1)\n \n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x_source = self.noise_convs[i](har)\n x_source = self.noise_res[i](x_source)\n \n x = self.ups[i](x)\n if i == self.num_upsamples - 1:\n x = self.reflection_pad(x)\n \n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i*self.num_kernels+j](x)\n else:\n xs += self.resblocks[i*self.num_kernels+j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])\n phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])\n\n return spec, phase\n\n def remove_weight_norm(self):\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)"
},
{
"identifier": "MultiPeriodDiscriminator",
"path": "models.py",
"snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiPeriodDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n DiscriminatorP(2),\n DiscriminatorP(3),\n DiscriminatorP(5),\n DiscriminatorP(7),\n DiscriminatorP(11),\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "MultiResSpecDiscriminator",
"path": "models.py",
"snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n ])\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n y_d_gs.append(y_d_g)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs"
},
{
"identifier": "feature_loss",
"path": "models.py",
"snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss*2"
},
{
"identifier": "generator_loss",
"path": "models.py",
"snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n l = torch.mean((1-dg)**2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses"
},
{
"identifier": "discriminator_loss",
"path": "models.py",
"snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1-dr)**2)\n g_loss = torch.mean(dg**2)\n loss += (r_loss + g_loss)\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses"
},
{
"identifier": "discriminator_TPRLS_loss",
"path": "models.py",
"snippet": "def discriminator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n tau = 0.04\n m_DG = torch.median((dr-dg))\n L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])\n loss += tau - F.relu(tau - L_rel)\n return loss"
},
{
"identifier": "generator_TPRLS_loss",
"path": "models.py",
"snippet": "def generator_TPRLS_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n for dg, dr in zip(disc_real_outputs, disc_generated_outputs):\n tau = 0.04\n m_DG = torch.median((dr-dg))\n L_rel = torch.mean((((dr - dg) - m_DG)**2)[dr < dg + m_DG])\n loss += tau - F.relu(tau - L_rel)\n return loss"
},
{
"identifier": "plot_spectrogram",
"path": "utils.py",
"snippet": "def plot_spectrogram(spectrogram):\n fig, ax = plt.subplots(figsize=(10, 2))\n im = ax.imshow(spectrogram, aspect=\"auto\", origin=\"lower\",\n interpolation='none')\n plt.colorbar(im, ax=ax)\n\n fig.canvas.draw()\n plt.close()\n\n return fig"
},
{
"identifier": "scan_checkpoint",
"path": "utils.py",
"snippet": "def scan_checkpoint(cp_dir, prefix):\n pattern = os.path.join(cp_dir, prefix + '????????')\n cp_list = glob.glob(pattern)\n if len(cp_list) == 0:\n return None\n return sorted(cp_list)[-1]"
},
{
"identifier": "load_checkpoint",
"path": "utils.py",
"snippet": "def load_checkpoint(filepath, device):\n assert os.path.isfile(filepath)\n print(\"Loading '{}'\".format(filepath))\n checkpoint_dict = torch.load(filepath, map_location=device)\n print(\"Complete.\")\n return checkpoint_dict"
},
{
"identifier": "save_checkpoint",
"path": "utils.py",
"snippet": "def save_checkpoint(filepath, obj):\n print(\"Saving checkpoint to {}\".format(filepath))\n torch.save(obj, filepath)\n print(\"Complete.\")"
},
{
"identifier": "TorchSTFT",
"path": "stft.py",
"snippet": "class TorchSTFT(torch.nn.Module):\n def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):\n super().__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))\n\n def transform(self, input_data):\n forward_transform = torch.stft(\n input_data,\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),\n return_complex=True)\n\n return torch.abs(forward_transform), torch.angle(forward_transform)\n\n def inverse(self, magnitude, phase):\n inverse_transform = torch.istft(\n magnitude * torch.exp(phase * 1j),\n self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))\n\n return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction"
},
{
"identifier": "JDCNet",
"path": "Utils/JDC/model.py",
"snippet": "class JDCNet(nn.Module):\n \"\"\"\n Joint Detection and Classification Network model for singing voice melody.\n \"\"\"\n def __init__(self, num_class=722, seq_len=31, leaky_relu_slope=0.01):\n super().__init__()\n self.num_class = num_class\n\n # input = (b, 1, 31, 513), b = batch size\n self.conv_block = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False), # out: (b, 64, 31, 513)\n nn.BatchNorm2d(num_features=64),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Conv2d(64, 64, 3, padding=1, bias=False), # (b, 64, 31, 513)\n )\n\n # res blocks\n self.res_block1 = ResBlock(in_channels=64, out_channels=128) # (b, 128, 31, 128)\n self.res_block2 = ResBlock(in_channels=128, out_channels=192) # (b, 192, 31, 32)\n self.res_block3 = ResBlock(in_channels=192, out_channels=256) # (b, 256, 31, 8)\n\n # pool block\n self.pool_block = nn.Sequential(\n nn.BatchNorm2d(num_features=256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.MaxPool2d(kernel_size=(1, 4)), # (b, 256, 31, 2)\n nn.Dropout(p=0.2),\n )\n\n # maxpool layers (for auxiliary network inputs)\n # in = (b, 128, 31, 513) from conv_block, out = (b, 128, 31, 2)\n self.maxpool1 = nn.MaxPool2d(kernel_size=(1, 40))\n # in = (b, 128, 31, 128) from res_block1, out = (b, 128, 31, 2)\n self.maxpool2 = nn.MaxPool2d(kernel_size=(1, 20))\n # in = (b, 128, 31, 32) from res_block2, out = (b, 128, 31, 2)\n self.maxpool3 = nn.MaxPool2d(kernel_size=(1, 10))\n\n # in = (b, 640, 31, 2), out = (b, 256, 31, 2)\n self.detector_conv = nn.Sequential(\n nn.Conv2d(640, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(leaky_relu_slope, inplace=True),\n nn.Dropout(p=0.2),\n )\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_classifier = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b, 31, 512) - resized from (b, 256, 31, 2)\n self.bilstm_detector = nn.LSTM(\n input_size=512, hidden_size=256,\n batch_first=True, bidirectional=True) # (b, 31, 512)\n\n # input: (b * 31, 512)\n self.classifier = nn.Linear(in_features=512, out_features=self.num_class) # (b * 31, num_class)\n\n # input: (b * 31, 512)\n self.detector = nn.Linear(in_features=512, out_features=2) # (b * 31, 2) - binary classifier\n\n # initialize weights\n self.apply(self.init_weights)\n\n def get_feature_GAN(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return poolblock_out.transpose(-1, -2)\n \n def get_feature(self, x):\n seq_len = x.shape[-2]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n \n return self.pool_block[2](poolblock_out)\n \n def forward(self, x):\n \"\"\"\n Returns:\n classification_prediction, detection_prediction\n sizes: (b, 31, 722), (b, 31, 2)\n \"\"\"\n ###############################\n # forward pass for classifier #\n ###############################\n seq_len = x.shape[-1]\n x = x.float().transpose(-1, -2)\n \n convblock_out = self.conv_block(x)\n \n resblock1_out = self.res_block1(convblock_out)\n resblock2_out = self.res_block2(resblock1_out)\n resblock3_out = self.res_block3(resblock2_out)\n \n \n poolblock_out = self.pool_block[0](resblock3_out)\n poolblock_out = self.pool_block[1](poolblock_out)\n GAN_feature = poolblock_out.transpose(-1, -2)\n poolblock_out = self.pool_block[2](poolblock_out)\n \n # (b, 256, 31, 2) => (b, 31, 256, 2) => (b, 31, 512)\n classifier_out = poolblock_out.permute(0, 2, 1, 3).contiguous().view((-1, seq_len, 512))\n classifier_out, _ = self.bilstm_classifier(classifier_out) # ignore the hidden states\n\n classifier_out = classifier_out.contiguous().view((-1, 512)) # (b * 31, 512)\n classifier_out = self.classifier(classifier_out)\n classifier_out = classifier_out.view((-1, seq_len, self.num_class)) # (b, 31, num_class)\n \n # sizes: (b, 31, 722), (b, 31, 2)\n # classifier output consists of predicted pitch classes per frame\n # detector output consists of: (isvoice, notvoice) estimates per frame\n return torch.abs(classifier_out.squeeze()), GAN_feature, poolblock_out\n\n @staticmethod\n def init_weights(m):\n if isinstance(m, nn.Linear):\n nn.init.kaiming_uniform_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n nn.init.xavier_normal_(m.weight)\n elif isinstance(m, nn.LSTM) or isinstance(m, nn.LSTMCell):\n for p in m.parameters():\n if p.data is None:\n continue\n\n if len(p.shape) >= 2:\n nn.init.orthogonal_(p.data)\n else:\n nn.init.normal_(p.data)"
}
] | import warnings
import itertools
import os
import time
import argparse
import json
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DistributedSampler, DataLoader
from torch.distributed import init_process_group
from torch.nn.parallel import DistributedDataParallel
from env import AttrDict, build_env
from meldataset import MelDataset, mel_spectrogram, get_dataset_filelist
from models import Generator, MultiPeriodDiscriminator, MultiResSpecDiscriminator, feature_loss, generator_loss,\
discriminator_loss, discriminator_TPRLS_loss, generator_TPRLS_loss
from utils import plot_spectrogram, scan_checkpoint, load_checkpoint, save_checkpoint
from stft import TorchSTFT
from Utils.JDC.model import JDCNet | 7,127 | warnings.simplefilter(action='ignore', category=FutureWarning)
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(h.F0_path)['model']
F0_model.load_state_dict(params)
generator = Generator(h, F0_model).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiResSpecDiscriminator().to(device)
stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else:
| warnings.simplefilter(action='ignore', category=FutureWarning)
torch.backends.cudnn.benchmark = True
def train(rank, a, h):
if h.num_gpus > 1:
init_process_group(backend=h.dist_config['dist_backend'], init_method=h.dist_config['dist_url'],
world_size=h.dist_config['world_size'] * h.num_gpus, rank=rank)
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda:{:d}'.format(rank))
F0_model = JDCNet(num_class=1, seq_len=192)
params = torch.load(h.F0_path)['model']
F0_model.load_state_dict(params)
generator = Generator(h, F0_model).to(device)
mpd = MultiPeriodDiscriminator().to(device)
msd = MultiResSpecDiscriminator().to(device)
stft = TorchSTFT(filter_length=h.gen_istft_n_fft, hop_length=h.gen_istft_hop_size, win_length=h.gen_istft_n_fft).to(device)
if rank == 0:
print(generator)
os.makedirs(a.checkpoint_path, exist_ok=True)
print("checkpoints directory : ", a.checkpoint_path)
if os.path.isdir(a.checkpoint_path):
cp_g = scan_checkpoint(a.checkpoint_path, 'g_')
cp_do = scan_checkpoint(a.checkpoint_path, 'do_')
steps = 0
if cp_g is None or cp_do is None:
state_dict_do = None
last_epoch = -1
else: | state_dict_g = load_checkpoint(cp_g, device) | 15 | 2023-12-16 03:53:55+00:00 | 8k |
m-abr/FCPCodebase | scripts/utils/Dribble.py | [
{
"identifier": "Agent",
"path": "agent/Agent.py",
"snippet": "class Agent(Base_Agent):\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int,\n team_name:str, enable_log, enable_draw, wait_for_server=True, is_fat_proxy=False) -> None:\n \n # define robot type\n robot_type = (0,1,1,1,2,3,3,3,4,4,4)[unum-1]\n\n # Initialize base agent\n # Args: Server IP, Agent Port, Monitor Port, Uniform No., Robot Type, Team Name, Enable Log, Enable Draw, play mode correction, Wait for Server, Hear Callback\n super().__init__(host, agent_port, monitor_port, unum, robot_type, team_name, enable_log, enable_draw, True, wait_for_server, None)\n\n self.enable_draw = enable_draw\n self.state = 0 # 0-Normal, 1-Getting up, 2-Kicking\n self.kick_direction = 0\n self.kick_distance = 0\n self.fat_proxy_cmd = \"\" if is_fat_proxy else None\n self.fat_proxy_walk = np.zeros(3) # filtered walk parameters for fat proxy\n\n self.init_pos = ([-14,0],[-9,-5],[-9,0],[-9,5],[-5,-5],[-5,0],[-5,5],[-1,-6],[-1,-2.5],[-1,2.5],[-1,6])[unum-1] # initial formation\n\n\n def beam(self, avoid_center_circle=False):\n r = self.world.robot\n pos = self.init_pos[:] # copy position list \n self.state = 0\n\n # Avoid center circle by moving the player back \n if avoid_center_circle and np.linalg.norm(self.init_pos) < 2.5:\n pos[0] = -2.3 \n\n if np.linalg.norm(pos - r.loc_head_position[:2]) > 0.1 or self.behavior.is_ready(\"Get_Up\"):\n self.scom.commit_beam(pos, M.vector_angle((-pos[0],-pos[1]))) # beam to initial position, face coordinate (0,0)\n else:\n if self.fat_proxy_cmd is None: # normal behavior\n self.behavior.execute(\"Zero_Bent_Knees_Auto_Head\")\n else: # fat proxy behavior\n self.fat_proxy_cmd += \"(proxy dash 0 0 0)\"\n self.fat_proxy_walk = np.zeros(3) # reset fat proxy walk\n\n\n def move(self, target_2d=(0,0), orientation=None, is_orientation_absolute=True,\n avoid_obstacles=True, priority_unums=[], is_aggressive=False, timeout=3000):\n '''\n Walk to target position\n\n Parameters\n ----------\n target_2d : array_like\n 2D target in absolute coordinates\n orientation : float\n absolute or relative orientation of torso, in degrees\n set to None to go towards the target (is_orientation_absolute is ignored)\n is_orientation_absolute : bool\n True if orientation is relative to the field, False if relative to the robot's torso\n avoid_obstacles : bool\n True to avoid obstacles using path planning (maybe reduce timeout arg if this function is called multiple times per simulation cycle)\n priority_unums : list\n list of teammates to avoid (since their role is more important)\n is_aggressive : bool\n if True, safety margins are reduced for opponents\n timeout : float\n restrict path planning to a maximum duration (in microseconds) \n '''\n r = self.world.robot\n\n if self.fat_proxy_cmd is not None: # fat proxy behavior\n self.fat_proxy_move(target_2d, orientation, is_orientation_absolute) # ignore obstacles\n return\n\n if avoid_obstacles:\n target_2d, _, distance_to_final_target = self.path_manager.get_path_to_target(\n target_2d, priority_unums=priority_unums, is_aggressive=is_aggressive, timeout=timeout)\n else:\n distance_to_final_target = np.linalg.norm(target_2d - r.loc_head_position[:2])\n\n self.behavior.execute(\"Walk\", target_2d, True, orientation, is_orientation_absolute, distance_to_final_target) # Args: target, is_target_abs, ori, is_ori_abs, distance\n\n\n\n\n\n def kick(self, kick_direction=None, kick_distance=None, abort=False, enable_pass_command=False):\n '''\n Walk to ball and kick\n\n Parameters\n ----------\n kick_direction : float\n kick direction, in degrees, relative to the field\n kick_distance : float\n kick distance in meters\n abort : bool\n True to abort.\n The method returns True upon successful abortion, which is immediate while the robot is aligning itself. \n However, if the abortion is requested during the kick, it is delayed until the kick is completed.\n avoid_pass_command : bool\n When False, the pass command will be used when at least one opponent is near the ball\n \n Returns\n -------\n finished : bool\n Returns True if the behavior finished or was successfully aborted.\n '''\n\n if self.min_opponent_ball_dist < 1.45 and enable_pass_command:\n self.scom.commit_pass_command()\n\n self.kick_direction = self.kick_direction if kick_direction is None else kick_direction\n self.kick_distance = self.kick_distance if kick_distance is None else kick_distance\n\n if self.fat_proxy_cmd is None: # normal behavior\n return self.behavior.execute(\"Basic_Kick\", self.kick_direction, abort) # Basic_Kick has no kick distance control\n else: # fat proxy behavior\n return self.fat_proxy_kick()\n\n\n\n\n def think_and_send(self):\n w = self.world\n r = self.world.robot \n my_head_pos_2d = r.loc_head_position[:2]\n my_ori = r.imu_torso_orientation\n ball_2d = w.ball_abs_pos[:2]\n ball_vec = ball_2d - my_head_pos_2d\n ball_dir = M.vector_angle(ball_vec)\n ball_dist = np.linalg.norm(ball_vec)\n ball_sq_dist = ball_dist * ball_dist # for faster comparisons\n ball_speed = np.linalg.norm(w.get_ball_abs_vel(6)[:2])\n behavior = self.behavior\n goal_dir = M.target_abs_angle(ball_2d,(15.05,0))\n path_draw_options = self.path_manager.draw_options\n PM = w.play_mode\n PM_GROUP = w.play_mode_group\n\n #--------------------------------------- 1. Preprocessing\n\n slow_ball_pos = w.get_predicted_ball_pos(0.5) # predicted future 2D ball position when ball speed <= 0.5 m/s\n\n # list of squared distances between teammates (including self) and slow ball (sq distance is set to 1000 in some conditions)\n teammates_ball_sq_dist = [np.sum((p.state_abs_pos[:2] - slow_ball_pos) ** 2) # squared distance between teammate and ball\n if p.state_last_update != 0 and (w.time_local_ms - p.state_last_update <= 360 or p.is_self) and not p.state_fallen\n else 1000 # force large distance if teammate does not exist, or its state info is not recent (360 ms), or it has fallen\n for p in w.teammates ]\n\n # list of squared distances between opponents and slow ball (sq distance is set to 1000 in some conditions)\n opponents_ball_sq_dist = [np.sum((p.state_abs_pos[:2] - slow_ball_pos) ** 2) # squared distance between teammate and ball\n if p.state_last_update != 0 and w.time_local_ms - p.state_last_update <= 360 and not p.state_fallen\n else 1000 # force large distance if opponent does not exist, or its state info is not recent (360 ms), or it has fallen\n for p in w.opponents ]\n\n min_teammate_ball_sq_dist = min(teammates_ball_sq_dist)\n self.min_teammate_ball_dist = math.sqrt(min_teammate_ball_sq_dist) # distance between ball and closest teammate\n self.min_opponent_ball_dist = math.sqrt(min(opponents_ball_sq_dist)) # distance between ball and closest opponent\n\n active_player_unum = teammates_ball_sq_dist.index(min_teammate_ball_sq_dist) + 1\n\n\n #--------------------------------------- 2. Decide action\n\n\n\n if PM == w.M_GAME_OVER:\n pass\n elif PM_GROUP == w.MG_ACTIVE_BEAM:\n self.beam()\n elif PM_GROUP == w.MG_PASSIVE_BEAM:\n self.beam(True) # avoid center circle\n elif self.state == 1 or (behavior.is_ready(\"Get_Up\") and self.fat_proxy_cmd is None):\n self.state = 0 if behavior.execute(\"Get_Up\") else 1 # return to normal state if get up behavior has finished\n elif PM == w.M_OUR_KICKOFF:\n if r.unum == 9:\n self.kick(120,3) # no need to change the state when PM is not Play On\n else:\n self.move(self.init_pos, orientation=ball_dir) # walk in place\n elif PM == w.M_THEIR_KICKOFF:\n self.move(self.init_pos, orientation=ball_dir) # walk in place\n elif active_player_unum != r.unum: # I am not the active player\n if r.unum == 1: # I am the goalkeeper\n self.move(self.init_pos, orientation=ball_dir) # walk in place \n else:\n # compute basic formation position based on ball position\n new_x = max(0.5,(ball_2d[0]+15)/15) * (self.init_pos[0]+15) - 15\n if self.min_teammate_ball_dist < self.min_opponent_ball_dist:\n new_x = min(new_x + 3.5, 13) # advance if team has possession\n self.move((new_x,self.init_pos[1]), orientation=ball_dir, priority_unums=[active_player_unum])\n\n else: # I am the active player\n path_draw_options(enable_obstacles=True, enable_path=True, use_team_drawing_channel=True) # enable path drawings for active player (ignored if self.enable_draw is False)\n enable_pass_command = (PM == w.M_PLAY_ON and ball_2d[0]<6)\n\n if r.unum == 1 and PM_GROUP == w.MG_THEIR_KICK: # goalkeeper during their kick\n self.move(self.init_pos, orientation=ball_dir) # walk in place \n if PM == w.M_OUR_CORNER_KICK:\n self.kick( -np.sign(ball_2d[1])*95, 5.5) # kick the ball into the space in front of the opponent's goal\n # no need to change the state when PM is not Play On\n elif self.min_opponent_ball_dist + 0.5 < self.min_teammate_ball_dist: # defend if opponent is considerably closer to the ball\n if self.state == 2: # commit to kick while aborting\n self.state = 0 if self.kick(abort=True) else 2\n else: # move towards ball, but position myself between ball and our goal\n self.move(slow_ball_pos + M.normalize_vec((-16,0) - slow_ball_pos) * 0.2, is_aggressive=True)\n else:\n self.state = 0 if self.kick(goal_dir,9,False,enable_pass_command) else 2\n\n path_draw_options(enable_obstacles=False, enable_path=False) # disable path drawings\n\n #--------------------------------------- 3. Broadcast\n self.radio.broadcast()\n\n #--------------------------------------- 4. Send to server\n if self.fat_proxy_cmd is None: # normal behavior\n self.scom.commit_and_send( r.get_command() )\n else: # fat proxy behavior\n self.scom.commit_and_send( self.fat_proxy_cmd.encode() ) \n self.fat_proxy_cmd = \"\"\n\n #---------------------- annotations for debugging\n if self.enable_draw: \n d = w.draw\n if active_player_unum == r.unum:\n d.point(slow_ball_pos, 3, d.Color.pink, \"status\", False) # predicted future 2D ball position when ball speed <= 0.5 m/s\n d.point(w.ball_2d_pred_pos[-1], 5, d.Color.pink, \"status\", False) # last ball prediction\n d.annotation((*my_head_pos_2d, 0.6), \"I've got it!\" , d.Color.yellow, \"status\")\n else:\n d.clear(\"status\")\n\n\n\n\n #--------------------------------------- Fat proxy auxiliary methods\n\n\n def fat_proxy_kick(self):\n w = self.world\n r = self.world.robot \n ball_2d = w.ball_abs_pos[:2]\n my_head_pos_2d = r.loc_head_position[:2]\n\n if np.linalg.norm(ball_2d - my_head_pos_2d) < 0.25:\n # fat proxy kick arguments: power [0,10]; relative horizontal angle [-180,180]; vertical angle [0,70]\n self.fat_proxy_cmd += f\"(proxy kick 10 {M.normalize_deg( self.kick_direction - r.imu_torso_orientation ):.2f} 20)\" \n self.fat_proxy_walk = np.zeros(3) # reset fat proxy walk\n return True\n else:\n self.fat_proxy_move(ball_2d-(-0.1,0), None, True) # ignore obstacles\n return False\n\n\n def fat_proxy_move(self, target_2d, orientation, is_orientation_absolute):\n r = self.world.robot\n\n target_dist = np.linalg.norm(target_2d - r.loc_head_position[:2])\n target_dir = M.target_rel_angle(r.loc_head_position[:2], r.imu_torso_orientation, target_2d)\n\n if target_dist > 0.1 and abs(target_dir) < 8:\n self.fat_proxy_cmd += (f\"(proxy dash {100} {0} {0})\")\n return\n\n if target_dist < 0.1:\n if is_orientation_absolute:\n orientation = M.normalize_deg( orientation - r.imu_torso_orientation )\n target_dir = np.clip(orientation, -60, 60)\n self.fat_proxy_cmd += (f\"(proxy dash {0} {0} {target_dir:.1f})\")\n else:\n self.fat_proxy_cmd += (f\"(proxy dash {20} {0} {target_dir:.1f})\")"
},
{
"identifier": "Base_Agent",
"path": "agent/Base_Agent.py",
"snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode_correction:bool=True, wait_for_server:bool=True, hear_callback=None) -> None:\n\n self.radio = None # hear_message may be called during Server_Comm instantiation\n self.logger = Logger(enable_log, f\"{team_name}_{unum}\")\n self.world = World(robot_type, team_name, unum, apply_play_mode_correction, enable_draw, self.logger, host)\n self.world_parser = World_Parser(self.world, self.hear_message if hear_callback is None else hear_callback)\n self.scom = Server_Comm(host,agent_port,monitor_port,unum,robot_type,team_name,self.world_parser,self.world,Base_Agent.all_agents,wait_for_server)\n self.inv_kinematics = Inverse_Kinematics(self.world.robot)\n self.behavior = Behavior(self)\n self.path_manager = Path_Manager(self.world)\n self.radio = Radio(self.world, self.scom.commit_announcement)\n self.behavior.create_behaviors()\n Base_Agent.all_agents.append(self)\n\n @abstractmethod\n def think_and_send(self):\n pass\n\n def hear_message(self, msg:bytearray, direction, timestamp:float) -> None:\n if direction != \"self\" and self.radio is not None:\n self.radio.receive(msg)\n\n def terminate(self):\n # close shared monitor socket if this is the last agent on this thread\n self.scom.close(close_monitor_socket=(len(Base_Agent.all_agents)==1))\n Base_Agent.all_agents.remove(self)\n\n @staticmethod\n def terminate_all():\n for o in Base_Agent.all_agents:\n o.scom.close(True) # close shared monitor socket, if it exists\n Base_Agent.all_agents = []"
},
{
"identifier": "Script",
"path": "scripts/commons/Script.py",
"snippet": "class Script():\n ROOT_DIR = path.dirname(path.dirname(realpath( join(getcwd(), dirname(__file__))) )) # project root directory\n\n def __init__(self, cpp_builder_unum=0) -> None:\n\n '''\n Arguments specification\n -----------------------\n - To add new arguments, edit the information below\n - After changing information below, the config.json file must be manually deleted\n - In other modules, these arguments can be accessed by their 1-letter ID\n '''\n # list of arguments: 1-letter ID, Description, Hardcoded default\n self.options = {'i': ('Server Hostname/IP', 'localhost'),\n 'p': ('Agent Port', '3100'),\n 'm': ('Monitor Port', '3200'),\n 't': ('Team Name', 'FCPortugal'),\n 'u': ('Uniform Number', '1'),\n 'r': ('Robot Type', '1'),\n 'P': ('Penalty Shootout', '0'),\n 'F': ('magmaFatProxy', '0'),\n 'D': ('Debug Mode', '1')}\n\n # list of arguments: 1-letter ID, data type, choices \n self.op_types = {'i': (str, None),\n 'p': (int, None),\n 'm': (int, None),\n 't': (str, None),\n 'u': (int, range(1,12)),\n 'r': (int, [0,1,2,3,4]),\n 'P': (int, [0,1]),\n 'F': (int, [0,1]),\n 'D': (int, [0,1])}\n \n '''\n End of arguments specification\n '''\n\n self.read_or_create_config()\n\n #advance help text position\n formatter = lambda prog: argparse.HelpFormatter(prog,max_help_position=52)\n parser = argparse.ArgumentParser(formatter_class=formatter)\n\n o = self.options\n t = self.op_types\n\n for id in self.options: # shorter metavar for aesthetic reasons\n parser.add_argument(f\"-{id}\", help=f\"{o[id][0]:30}[{o[id][1]:20}]\", type=t[id][0], nargs='?', default=o[id][1], metavar='X', choices=t[id][1])\n \n self.args = parser.parse_args()\n\n if getattr(sys, 'frozen', False): # disable debug mode when running from binary\n self.args.D = 0\n\n self.players = [] # list of created players\n\n Script.build_cpp_modules(exit_on_build = (cpp_builder_unum != 0 and cpp_builder_unum != self.args.u))\n\n if self.args.D:\n try:\n print(f\"\\nNOTE: for help run \\\"python {__main__.__file__} -h\\\"\")\n except:\n pass\n\n columns = [[],[],[]]\n for key, value in vars(self.args).items():\n columns[0].append(o[key][0])\n columns[1].append(o[key][1])\n columns[2].append(value)\n\n UI.print_table(columns, [\"Argument\",\"Default at /config.json\",\"Active\"], alignment=[\"<\",\"^\",\"^\"])\n\n\n def read_or_create_config(self) -> None:\n\n if not path.isfile('config.json'): # save hardcoded default values if file does not exist\n with open(\"config.json\", \"w\") as f:\n json.dump(self.options, f, indent=4)\n else: # load user-defined values (that can be overwritten by command-line arguments)\n if path.getsize(\"config.json\") == 0: # wait for possible write operation when launching multiple agents\n from time import sleep\n sleep(1)\n if path.getsize(\"config.json\") == 0: # abort after 1 second\n print(\"Aborting: 'config.json' is empty. Manually verify and delete if still empty.\")\n exit()\n \n with open(\"config.json\", \"r\") as f:\n self.options = json.loads(f.read())\n\n\n @staticmethod\n def build_cpp_modules(special_environment_prefix=[], exit_on_build=False):\n '''\n Build C++ modules in folder /cpp using Pybind11\n \n Parameters\n ----------\n special_environment_prefix : `list`\n command prefix to run a given command in the desired environment\n useful to compile C++ modules for different python interpreter versions (other than default version)\n Conda Env. example: ['conda', 'run', '-n', 'myEnv']\n If [] the default python interpreter is used as compilation target\n exit_on_build : bool\n exit if there is something to build (so that only 1 player per team builds c++ modules)\n '''\n cpp_path = Script.ROOT_DIR + \"/cpp/\"\n exclusions = [\"__pycache__\"]\n\n cpp_modules = [d for d in listdir(cpp_path) if isdir(join(cpp_path, d)) and d not in exclusions]\n\n if not cpp_modules: return #no modules to build\n\n python_cmd = f\"python{sys.version_info.major}.{sys.version_info.minor}\" # \"python3\" can select the wrong version, this prevents that\n\n def init():\n print(\"--------------------------\\nC++ modules:\",cpp_modules)\n\n try:\n process = subprocess.Popen(special_environment_prefix+[python_cmd, \"-m\", \"pybind11\", \"--includes\"], stdout=subprocess.PIPE)\n (includes, err) = process.communicate()\n process.wait()\n except:\n print(f\"Error while executing child program: '{python_cmd} -m pybind11 --includes'\")\n exit()\n\n includes = includes.decode().rstrip() # strip trailing newlines (and other whitespace chars)\n print(\"Using Pybind11 includes: '\",includes,\"'\",sep=\"\")\n return includes\n\n nproc = str(cpu_count())\n zero_modules = True\n\n for module in cpp_modules:\n module_path = join(cpp_path, module)\n\n # skip module if there is no Makefile (typical distribution case)\n if not isfile(join(module_path, \"Makefile\")):\n continue\n\n # skip module in certain conditions\n if isfile(join(module_path, module+\".so\")) and isfile(join(module_path, module+\".c_info\")):\n with open(join(module_path, module+\".c_info\"), 'rb') as f:\n info = pickle.load(f)\n if info == python_cmd:\n code_mod_time = max(getmtime(join(module_path, f)) for f in listdir(module_path) if f.endswith(\".cpp\") or f.endswith(\".h\"))\n bin_mod_time = getmtime(join(module_path, module+\".so\"))\n if bin_mod_time + 30 > code_mod_time: # favor not building with a margin of 30s (scenario: we unzip the fcpy project, including the binaries, the modification times are all similar)\n continue\n\n # init: print stuff & get Pybind11 includes\n if zero_modules:\n if exit_on_build:\n print(\"There are C++ modules to build. This player is not allowed to build. Aborting.\")\n exit()\n zero_modules = False\n includes = init()\n\n # build module\n print(f'{f\"Building: {module}... \":40}',end='',flush=True)\n process = subprocess.Popen(['make', '-j'+nproc, 'PYBIND_INCLUDES='+includes], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=module_path)\n (output, err) = process.communicate()\n exit_code = process.wait()\n if exit_code == 0:\n print(\"success!\")\n with open(join(module_path, module+\".c_info\"),\"wb\") as f: # save python version\n pickle.dump(python_cmd, f, protocol=4) # protocol 4 is backward compatible with Python 3.4\n else:\n print(\"Aborting! Building errors:\")\n print(output.decode(), err.decode())\n exit() \n\n if not zero_modules:\n print(\"All modules were built successfully!\\n--------------------------\")\n\n\n def batch_create(self, agent_cls, args_per_player): \n ''' Creates batch of agents '''\n\n for a in args_per_player:\n self.players.append( agent_cls(*a) )\n\n def batch_execute_agent(self, index : slice = slice(None)): \n ''' \n Executes agent normally (including commit & send)\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p in self.players[index]:\n p.think_and_send()\n\n def batch_execute_behavior(self, behavior, index : slice = slice(None)):\n '''\n Executes behavior\n\n Parameters\n ----------\n behavior : str\n name of behavior to execute\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.behavior.execute(behavior)\n\n def batch_commit_and_send(self, index : slice = slice(None)):\n '''\n Commits & sends data to server\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.scom.commit_and_send( p.world.robot.get_command() ) \n\n def batch_receive(self, index : slice = slice(None), update=True):\n ''' \n Waits for server messages\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n update : bool\n update world state based on information received from server\n if False, the agent becomes unaware of itself and its surroundings\n which is useful for reducing cpu resources for dummy agents in demonstrations\n '''\n for p in self.players[index]:\n p.scom.receive(update)\n\n def batch_commit_beam(self, pos2d_and_rotation, index : slice = slice(None)):\n '''\n Beam all player to 2D position with a given rotation\n\n Parameters\n ----------\n pos2d_and_rotation : `list`\n iterable of 2D positions and rotations e.g. [(0,0,45),(-5,0,90)]\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p, pos_rot in zip(self.players[index], pos2d_and_rotation): \n p.scom.commit_beam(pos_rot[0:2],pos_rot[2])\n\n def batch_unofficial_beam(self, pos3d_and_rotation, index : slice = slice(None)):\n '''\n Beam all player to 3D position with a given rotation\n\n Parameters\n ----------\n pos3d_and_rotation : `list`\n iterable of 3D positions and rotations e.g. [(0,0,0.5,45),(-5,0,0.5,90)]\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n ''' \n for p, pos_rot in zip(self.players[index], pos3d_and_rotation): \n p.scom.unofficial_beam(pos_rot[0:3],pos_rot[3])\n\n def batch_terminate(self, index : slice = slice(None)):\n '''\n Close all sockets connected to the agent port\n For scripts where the agent lives until the application ends, this is not needed\n\n Parameters\n ----------\n index : slice\n subset of agents\n (e.g. index=slice(1,2) will select the second agent)\n (e.g. index=slice(1,3) will select the second and third agents)\n by default, all agents are selected\n '''\n for p in self.players[index]:\n p.terminate()\n del self.players[index] # delete selection"
}
] | from agent.Agent import Agent
from agent.Base_Agent import Base_Agent
from scripts.commons.Script import Script
import numpy as np | 7,156 |
'''
Objective:
----------
Dribble and score
'''
class Dribble():
def __init__(self, script:Script) -> None:
self.script = script
def execute(self):
a = self.script.args
# Args: Server IP, Agent Port, Monitor Port, Uniform No., [Robot Type] (for Base_Agent), Team name, Enable Log, Enable Draw
|
'''
Objective:
----------
Dribble and score
'''
class Dribble():
def __init__(self, script:Script) -> None:
self.script = script
def execute(self):
a = self.script.args
# Args: Server IP, Agent Port, Monitor Port, Uniform No., [Robot Type] (for Base_Agent), Team name, Enable Log, Enable Draw | self.script.batch_create(Base_Agent, ((a.i,a.p,a.m,a.u,a.r,a.t,True,True),)) # one dribbler | 1 | 2023-12-16 23:40:23+00:00 | 8k |
koenhendriks/ha-button-plus | custom_components/button_plus/config_flow.py | [
{
"identifier": "ApiClient",
"path": "custom_components/button_plus/button_plus_api/api_client.py",
"snippet": "class ApiClient:\n \"\"\" Client to talk to Button+ website \"\"\"\n\n def __init__(self, session, cookie=None) -> None:\n _LOGGER.debug(f\"DEBUG CONFIG {cookie}\")\n self._base = \"https://api.button.plus\"\n self._session = session\n\n if not cookie:\n cookie = \"\"\n\n self._cookie = cookie\n self._headers = {\n 'authority': 'api.button.plus',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',\n 'accept-language': 'en-NL,en-US;q=0.9,en;q=0.8,nl-NL;q=0.7,nl;q=0.6,en-GB;q=0.5',\n 'cache-control': 'no-cache',\n 'cookie': self._cookie,\n }\n\n _LOGGER.debug(f\"Initialize Button+ API client\")\n\n async def test_connection(self):\n url = f\"{self._base}/button/buttons\"\n _LOGGER.debug(f\"test_connection on {url}\")\n async with self._session.get(url, headers=self._headers) as response:\n _LOGGER.debug(f\"Fetch website validation = {response.status} {response}\")\n return 200 <= response.status < 300\n\n async def fetch_config(self, config=int):\n url = f\"{self._base}/button/config/{config}\"\n _LOGGER.debug(f\"fetch_config {url}\")\n async with self._session.get(url, headers=self._headers) as response:\n return await response.text()\n\n async def fetch_configs(self):\n url = f\"{self._base}/button/buttons\"\n _LOGGER.debug(f\"fetch_configs {url}\")\n async with self._session.get(url, headers=self._headers) as response:\n return await response.text()\n\n async def get_cookie_from_login(self, email=str, password=str):\n url = f\"{self._base}/account/login\"\n data = {\"email\": email, \"password\": password, \"remember\": True}\n json_data = json.dumps(data)\n _LOGGER.debug(f\"json dump: {json_data}\")\n headers = {\n 'accept': '*/*',\n 'accept-language': 'en-NL,en;q=0.9',\n 'content-type': 'application/json',\n 'origin': 'https://button.plus',\n 'referer': 'https://button.plus/',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'\n }\n\n async with self._session.post(url, data=json_data, headers=headers) as response:\n response_body = await response.text()\n\n if not response.cookies:\n raise Exception(f\"Login error with username and password, response: {response_body}\")\n\n cookie_string = str(response.cookies)\n match = re.search(r'auth_cookie=[^;]+', cookie_string)\n\n auth_cookie = match.group()\n\n return auth_cookie"
},
{
"identifier": "LocalApiClient",
"path": "custom_components/button_plus/button_plus_api/local_api_client.py",
"snippet": "class LocalApiClient:\n \"\"\" Client to talk to Button+ local devices \"\"\"\n\n def __init__(self, ip_address, session) -> None:\n self._base = f\"http://{ip_address}\"\n self._session = session\n\n _LOGGER.debug(f\"Initialize Button+ local API client\")\n\n async def fetch_config(self):\n url = f\"{self._base}/config\"\n _LOGGER.debug(f\"fetch_config {url}\")\n async with self._session.get(url) as response:\n return await response.text()\n\n async def push_config(self, config):\n url = f\"{self._base}/configsave\"\n _LOGGER.debug(f\"push_config {url}\")\n async with self._session.post(url, data=config.to_json()) as response:\n return await response.text()"
},
{
"identifier": "DeviceConfiguration",
"path": "custom_components/button_plus/button_plus_api/model.py",
"snippet": "class DeviceConfiguration:\n def __init__(self, info: Info, core: Core, mqtt_buttons: List[MqttButton], mqtt_displays: List[MqttDisplay],\n mqtt_brokers: List[MqttBroker], mqtt_sensors: List[MqttSensor]):\n self.info = info\n self.core = core\n self.mqtt_buttons = mqtt_buttons\n self.mqtt_displays = mqtt_displays\n self.mqtt_brokers = mqtt_brokers\n self.mqtt_sensors = mqtt_sensors\n\n @staticmethod\n def from_json(json_data: str) -> 'DeviceConfiguration':\n data = json.loads(json_data)\n return DeviceConfiguration(\n info=Info.from_dict(data['info']),\n core=Core.from_dict(data['core']),\n mqtt_buttons=[MqttButton.from_dict(button) for button in data['mqttbuttons']],\n mqtt_displays=[MqttDisplay.from_dict(display) for display in data['mqttdisplays']],\n mqtt_brokers=[MqttBroker.from_dict(broker) for broker in data['mqttbrokers']],\n mqtt_sensors=[MqttSensor.from_dict(sensor) for sensor in data['mqttsensors']],\n )\n\n def to_json(self) -> str:\n def serialize(obj):\n if hasattr(obj, '__dict__'):\n d = obj.__dict__.copy()\n\n # Convert the root keys\n if isinstance(obj, DeviceConfiguration):\n d['mqttbuttons'] = [serialize(button) for button in d.pop('mqtt_buttons')]\n d['mqttdisplays'] = [serialize(display) for display in d.pop('mqtt_displays')]\n d['mqttbrokers'] = [serialize(broker) for broker in d.pop('mqtt_brokers')]\n d['mqttsensors'] = [serialize(sensor) for sensor in d.pop('mqtt_sensors')]\n\n if isinstance(obj, Info):\n d['id'] = d.pop('device_id')\n d['ipaddress'] = d.pop('ip_address')\n d['largedisplay'] = d.pop('large_display')\n\n elif isinstance(obj, Connector):\n d['id'] = d.pop('connector_id')\n d['type'] = d.pop('connector_type')\n\n elif isinstance(obj, Sensor):\n d['sensorid'] = d.pop('sensor_id')\n\n elif isinstance(obj, Core):\n d['autobackup'] = d.pop('auto_backup')\n d['brightnesslargedisplay'] = d.pop('brightness_large_display')\n d['brightnessminidisplay'] = d.pop('brightness_mini_display')\n d['ledcolorfront'] = d.pop('led_color_front')\n d['ledcolorwall'] = d.pop('led_color_wall')\n\n # Custom mappings for MqttButton class\n elif isinstance(obj, MqttButton):\n d['id'] = d.pop('button_id')\n d['toplabel'] = d.pop('top_label')\n d['ledcolorfront'] = d.pop('led_color_front')\n d['ledcolorwall'] = d.pop('led_color_wall')\n d['longdelay'] = d.pop('long_delay')\n d['longrepeat'] = d.pop('long_repeat')\n\n elif isinstance(obj, Topic):\n d['brokerid'] = d.pop('broker_id')\n d['eventtype'] = d.pop('event_type')\n\n elif isinstance(obj, MqttDisplay):\n d['fontsize'] = d.pop('font_size')\n d['topics'] = [serialize(topic) for topic in d['topics']]\n\n elif isinstance(obj, MqttBroker):\n d['brokerid'] = d.pop('broker_id')\n d['wsport'] = d.pop('ws_port')\n\n elif isinstance(obj, MqttSensor):\n d['sensorid'] = d.pop('sensor_id')\n d['topic'] = serialize(d['topic'])\n\n # Filter out None values\n return {k: v for k, v in d.items() if v is not None}\n else:\n return str(obj)\n\n return json.dumps(self, default=serialize, indent=4)"
},
{
"identifier": "MqttBroker",
"path": "custom_components/button_plus/button_plus_api/model.py",
"snippet": "class MqttBroker:\n def __init__(self, broker_id: str, url: str, port: int, ws_port: int, username: str, password: str):\n self.broker_id = broker_id\n self.url = url\n self.port = port\n self.ws_port = ws_port\n self.username = username\n self.password = password\n\n @staticmethod\n def from_dict(data: Dict[str, Any]) -> 'MqttBroker':\n return MqttBroker(\n broker_id=data['brokerid'],\n url=data['url'],\n port=data['port'],\n ws_port=data['wsport'],\n username=data['username'],\n password=data['password']\n )"
},
{
"identifier": "EventType",
"path": "custom_components/button_plus/button_plus_api/event_type.py",
"snippet": "class EventType(int, Enum):\n CLICK = 0\n LONG_PRESS = 1\n BLUE_LED = 8\n RED_LED = 9\n GREEN_LED = 10\n LABEL = 11\n TOPLABEL = 12\n RGB_LED = 13\n LED = 14\n VALUE = 15\n UNIT = 17\n SENSOR_VALUE = 18"
},
{
"identifier": "DOMAIN",
"path": "custom_components/button_plus/const.py",
"snippet": "DOMAIN = \"button_plus\""
}
] | import ipaddress
import json
import logging
import traceback
import voluptuous as vol
from json import JSONDecodeError
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_IP_ADDRESS, CONF_EMAIL, CONF_PASSWORD, CONF_HOST
from homeassistant.helpers import aiohttp_client
from .button_plus_api.api_client import ApiClient
from .button_plus_api.local_api_client import LocalApiClient
from .button_plus_api.model import DeviceConfiguration, MqttBroker
from .button_plus_api.event_type import EventType
from homeassistant.helpers.network import get_url
from .const import DOMAIN # pylint:disable=unused-import | 4,125 | )
async def async_step_fetch_website(self, user_input=None):
"""Handle fetching the Button+ devices from the website."""
errors = {}
_LOGGER.debug(f"Fetch website step {user_input}")
if user_input is not None:
try:
api_client = await self.setup_api_client(user_input)
valid = await api_client.test_connection()
if valid:
json_response = await api_client.fetch_configs()
devices = json.loads(json_response)
last_entry = None
total_devices = len(devices)
_LOGGER.info(f"Found {total_devices} devices from Button+ website")
for device in devices:
device_website_id = device.get("Id")
device_ip = device.get('IpAddress')
if not device_ip:
_LOGGER.warning(f"Skipping device {device_website_id}, it has no IP so must be virtual")
continue
_LOGGER.debug(f"loaded device from website with id: {device_website_id} and ip {device_ip}")
device_config = json.loads(device.get("Json"))
device_name = device_config.get('core').get('name')
device_id = device_config.get('info').get('id')
last_entry = self.async_create_entry(
title=f"{device_name}",
description=f"Base module on {device_ip} with local id {device_id} and website id {device_website_id}",
data=device_config
)
return last_entry
except JSONDecodeError as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Could not parse json from Button+ website : %s - traceback: %s",
ex,
traceback.format_exc()
)
errors["base"] = "Error connecting or reading from https://api.button.plus/"
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Exception in login : %s - traceback: %s",
ex,
traceback.format_exc()
)
errors["base"] = "cannot_connect"
if "base" not in errors:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="fetch_website",
data_schema=vol.Schema({CONF_EMAIL: str, CONF_PASSWORD: str, "cookie": str}),
errors=errors
)
async def setup_api_client(self, user_input):
_LOGGER.debug(f"Setting up API client with {user_input}")
if "cookie" not in user_input:
client = ApiClient(aiohttp_client.async_get_clientsession(self.hass))
cookie = await client.get_cookie_from_login(user_input.get('email'), user_input.get('password'))
else:
cookie = user_input.get("cookie")
return ApiClient(aiohttp_client.async_get_clientsession(self.hass), cookie)
def validate_ip(self, ip) -> bool:
try:
ipaddress.IPv4Address(ip)
return True
except ValueError:
return False
def add_broker_to_config(self, device_config: DeviceConfiguration) -> DeviceConfiguration:
mqtt_entry = self.mqtt_entry
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "")
broker_password = mqtt_entry.data.get("password", "")
broker = MqttBroker(
broker_id=f"ha-button-plus",
url=f"mqtt://{self.broker_endpoint}/",
port=broker_port,
ws_port=9001,
username=broker_username,
password=broker_password
)
device_config.mqtt_brokers.append(broker)
return device_config
def add_topics_to_buttons(self, device_config) -> DeviceConfiguration:
device_id = device_config.info.device_id
active_connectors = [
connector.connector_id
for connector in device_config.info.connectors
if connector.connector_type in [1, 2]
]
for button in filter(lambda b: b.button_id // 2 in active_connectors, device_config.mqtt_buttons):
# Create topics for button main label
button.topics.append({
"brokerid": "ha-button-plus",
"topic": f"buttonplus/{device_id}/button/{button.button_id}/label",
"payload": "",
| """Config flow for Hello World integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Button+."""
local_brokers = [
"core-mosquitto",
"127.0.0.1",
"localhost"
]
def __init__(self):
self.mqtt_entry = None
self.broker_endpoint = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial Button+ setup, showing the 2 options and checking the MQTT integration."""
errors = {}
mqtt_entries = self.hass.config_entries.async_entries(domain="mqtt")
if len(mqtt_entries) < 1:
mqtt_url = f'{get_url(self.hass)}/config/integrations/integration/mqtt'
return self.async_abort(
reason="mqtt_not_enabled",
description_placeholders={
"mqtt_integration_link": mqtt_url
})
mqtt_entry = mqtt_entries[0]
broker = self.get_mqtt_endpoint(mqtt_entry.data.get("broker"))
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "(No authentication)")
self.mqtt_entry = mqtt_entry
if user_input is not None:
self.broker_endpoint = user_input.get("broker", broker)
return await self.async_step_choose_entry()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required("broker", default=broker): str
}),
errors=errors,
description_placeholders={
"mqtt_broker": broker,
"mqtt_broker_port": broker_port,
"mqtt_user": broker_username
}
)
async def async_step_choose_entry(self, user_input=None):
errors = {}
# if user_input is not None:
return self.async_show_menu(
step_id="choose_entry",
menu_options=["fetch_website", "manual"],
description_placeholders={}
)
async def async_step_manual(self, user_input=None):
""" Handle setting up button plus from manual IP."""
errors = {}
ip = None
if user_input is not None:
ip = user_input.get(CONF_IP_ADDRESS, None)
valid = self.validate_ip(ip)
if valid:
try:
_LOGGER.debug(f"Fetching button+ device at {ip}")
api_client = LocalApiClient(ip, aiohttp_client.async_get_clientsession(self.hass))
json_config = await api_client.fetch_config()
device_config: DeviceConfiguration = DeviceConfiguration.from_json(json_config)
self.add_broker_to_config(device_config)
self.add_topics_to_buttons(device_config)
await api_client.push_config(device_config)
return self.async_create_entry(
title=f"{device_config.core.name}",
description=f"Base module on {ip} with id {device_config.info.device_id}",
data={"config": json_config}
)
except JSONDecodeError as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Could not parse json from IP {ip} : %s - traceback: %s",
ex,
traceback.format_exc()
)
errors["base"] = f"Error connecting or reading from {ip}"
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Exception in login : %s - traceback: %s",
ex,
traceback.format_exc()
)
errors["base"] = "cannot_connect"
else:
errors["base"] = 'invalid_ip'
return self.async_show_form(
step_id="manual",
data_schema=vol.Schema({CONF_IP_ADDRESS: str}),
errors=errors,
description_placeholders={
"ip": ip
}
)
async def async_step_fetch_website(self, user_input=None):
"""Handle fetching the Button+ devices from the website."""
errors = {}
_LOGGER.debug(f"Fetch website step {user_input}")
if user_input is not None:
try:
api_client = await self.setup_api_client(user_input)
valid = await api_client.test_connection()
if valid:
json_response = await api_client.fetch_configs()
devices = json.loads(json_response)
last_entry = None
total_devices = len(devices)
_LOGGER.info(f"Found {total_devices} devices from Button+ website")
for device in devices:
device_website_id = device.get("Id")
device_ip = device.get('IpAddress')
if not device_ip:
_LOGGER.warning(f"Skipping device {device_website_id}, it has no IP so must be virtual")
continue
_LOGGER.debug(f"loaded device from website with id: {device_website_id} and ip {device_ip}")
device_config = json.loads(device.get("Json"))
device_name = device_config.get('core').get('name')
device_id = device_config.get('info').get('id')
last_entry = self.async_create_entry(
title=f"{device_name}",
description=f"Base module on {device_ip} with local id {device_id} and website id {device_website_id}",
data=device_config
)
return last_entry
except JSONDecodeError as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Could not parse json from Button+ website : %s - traceback: %s",
ex,
traceback.format_exc()
)
errors["base"] = "Error connecting or reading from https://api.button.plus/"
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
f"{DOMAIN} Exception in login : %s - traceback: %s",
ex,
traceback.format_exc()
)
errors["base"] = "cannot_connect"
if "base" not in errors:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="fetch_website",
data_schema=vol.Schema({CONF_EMAIL: str, CONF_PASSWORD: str, "cookie": str}),
errors=errors
)
async def setup_api_client(self, user_input):
_LOGGER.debug(f"Setting up API client with {user_input}")
if "cookie" not in user_input:
client = ApiClient(aiohttp_client.async_get_clientsession(self.hass))
cookie = await client.get_cookie_from_login(user_input.get('email'), user_input.get('password'))
else:
cookie = user_input.get("cookie")
return ApiClient(aiohttp_client.async_get_clientsession(self.hass), cookie)
def validate_ip(self, ip) -> bool:
try:
ipaddress.IPv4Address(ip)
return True
except ValueError:
return False
def add_broker_to_config(self, device_config: DeviceConfiguration) -> DeviceConfiguration:
mqtt_entry = self.mqtt_entry
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "")
broker_password = mqtt_entry.data.get("password", "")
broker = MqttBroker(
broker_id=f"ha-button-plus",
url=f"mqtt://{self.broker_endpoint}/",
port=broker_port,
ws_port=9001,
username=broker_username,
password=broker_password
)
device_config.mqtt_brokers.append(broker)
return device_config
def add_topics_to_buttons(self, device_config) -> DeviceConfiguration:
device_id = device_config.info.device_id
active_connectors = [
connector.connector_id
for connector in device_config.info.connectors
if connector.connector_type in [1, 2]
]
for button in filter(lambda b: b.button_id // 2 in active_connectors, device_config.mqtt_buttons):
# Create topics for button main label
button.topics.append({
"brokerid": "ha-button-plus",
"topic": f"buttonplus/{device_id}/button/{button.button_id}/label",
"payload": "", | "eventtype": EventType.LABEL | 4 | 2023-12-18 15:14:21+00:00 | 8k |
Sam-Izdat/tinycio | src/tinycio/colorspace.py | [
{
"identifier": "Float2",
"path": "src/tinycio/numerics/vector.py",
"snippet": "class Float2(np.ndarray):\n \"\"\"\n Float2 type using numpy.ndarray.\n \"\"\"\n def __new__(cls, *args):\n if len(args) == 1:\n if isinstance(args[0], list) or isinstance(args[0], tuple):\n assert len(args[0]) == 2, \"list/tuple must have 2 components\"\n arr = np.asarray([args[0][0], args[0][1]], dtype=np.float32).view(cls)\n elif isinstance(args[0], np.ndarray):\n assert len(args[0].squeeze().shape) == 1 and args[0].shape[0] == 2, \\\n \"numpy array must be sized [C=2] or [C=2, H=1, W=1]\"\n arr = np.asarray(args[0].squeeze(), dtype=np.float32).view(cls)\n elif torch.is_tensor(args[0]):\n assert len(args[0].squeeze().size()) == 1 and args[0].size(0) == 2, \\\n \"torch tensor must be sized [C=2] or [C=2, H=1, W=1]\"\n value = args[0].squeeze().float().cpu()\n arr = np.asarray([value[0].item(), value[1].item()], dtype=np.float32).view(cls)\n else:\n value = float(args[0])\n arr = np.asarray([value, value], dtype=np.float32).view(cls)\n elif len(args) == 2:\n arr = np.asarray(args, dtype=np.float32).view(cls)\n else: \n raise TypeError(\"Float2 only accepts 1 or 2 arguments.\")\n return arr\n\n def list(self) -> list:\n \"\"\"Returns values as Python list\"\"\"\n return [self[0], self[1]]\n\n def tuple(self) -> tuple:\n \"\"\"Returns values as Python tuple\"\"\"\n return (self[0], self[1])\n\n @property\n def x(self) -> float:\n return self[0]\n @x.setter\n def x(self, value):\n self[0] = value\n @property\n def y(self) -> float:\n return self[1]\n @y.setter\n def y(self, value):\n self[1] = value\n @property\n def r(self) -> float:\n return self[0]\n @r.setter\n def r(self, value):\n self[0] = value\n @property\n def g(self) -> float:\n return self[1]\n @g.setter\n def g(self, value):\n self[1] = value\n\n @staticmethod\n def zero():\n \"\"\"Returns numeric type filled with zero values\"\"\"\n return Float2(0., 0.)\n @staticmethod\n def one():\n \"\"\"Returns numeric type filled with one values\"\"\"\n return Float2(1., 1.)\n @staticmethod\n def x_axis():\n \"\"\"Returns numeric type with x-axis set to 1 and all others to 0\"\"\"\n return Float2(1., 0.)\n @staticmethod\n def y_axis():\n \"\"\"Returns numeric type with y-axis set to 1 and all others to 0\"\"\"\n return Float2(0., 1.)\n\n @property\n def xx(self): return Float2(self.x, self.x)\n @property\n def xy(self): return self\n @property\n def yx(self): return Float2(self.y, self.x)\n @property\n def yy(self): return Float2(self.y, self.y)\n\n @property\n def rr(self): return Float2(self.r, self.r)\n @property\n def rg(self): return self\n @property\n def gr(self): return Float2(self.g, self.r)\n @property\n def gg(self): return Float2(self.g, self.g)\n\n @property\n def xxx(self): return Float3(self.x, self.x, self.x)\n @property\n def xxy(self): return Float3(self.x, self.x, self.y)\n @property\n def xyx(self): return Float3(self.x, self.y, self.x)\n @property\n def xyy(self): return Float3(self.x, self.y, self.y)\n @property\n def yxx(self): return Float3(self.y, self.x, self.x)\n @property\n def yxy(self): return Float3(self.y, self.x, self.y)\n @property\n def yyx(self): return Float3(self.y, self.y, self.x)\n @property\n def yyy(self): return Float3(self.y, self.y, self.y)\n\n @property\n def rrr(self): return Float3(self.r, self.r, self.r)\n @property\n def rrg(self): return Float3(self.r, self.r, self.g)\n @property\n def rgr(self): return Float3(self.r, self.g, self.r)\n @property\n def rgg(self): return Float3(self.r, self.g, self.g)\n @property\n def grr(self): return Float3(self.g, self.r, self.r)\n @property\n def grg(self): return Float3(self.g, self.r, self.g)\n @property\n def ggr(self): return Float3(self.g, self.g, self.r)\n @property\n def ggg(self): return Float3(self.g, self.g, self.g)\n\n @property\n def xxxx(self): return Float4(self.x, self.x, self.x, self.x)\n @property\n def xxxy(self): return Float4(self.x, self.x, self.x, self.y)\n @property\n def xxyx(self): return Float4(self.x, self.x, self.y, self.x)\n @property\n def xxyy(self): return Float4(self.x, self.x, self.y, self.y)\n @property\n def xyxx(self): return Float4(self.x, self.y, self.x, self.x)\n @property\n def xyxy(self): return Float4(self.x, self.y, self.x, self.y)\n @property\n def xyyx(self): return Float4(self.x, self.y, self.y, self.x)\n @property\n def xyyy(self): return Float4(self.x, self.y, self.y, self.y)\n @property\n def yxxx(self): return Float4(self.y, self.x, self.x, self.x)\n @property\n def yxxy(self): return Float4(self.y, self.x, self.x, self.y)\n @property\n def yxyx(self): return Float4(self.y, self.x, self.y, self.x)\n @property\n def yxyy(self): return Float4(self.y, self.x, self.y, self.y)\n @property\n def yyxx(self): return Float4(self.y, self.y, self.x, self.x)\n @property\n def yyxy(self): return Float4(self.y, self.y, self.x, self.y)\n @property\n def yyyx(self): return Float4(self.y, self.y, self.y, self.x)\n @property\n def yyyy(self): return Float4(self.y, self.y, self.y, self.y)\n\n @property\n def rrrr(self): return Float4(self.r, self.r, self.r, self.r)\n @property\n def rrrg(self): return Float4(self.r, self.r, self.r, self.g)\n @property\n def rrgr(self): return Float4(self.r, self.r, self.g, self.r)\n @property\n def rrgg(self): return Float4(self.r, self.r, self.g, self.g)\n @property\n def rgrr(self): return Float4(self.r, self.g, self.r, self.r)\n @property\n def rgrg(self): return Float4(self.r, self.g, self.r, self.g)\n @property\n def rggr(self): return Float4(self.r, self.g, self.g, self.r)\n @property\n def rggg(self): return Float4(self.r, self.g, self.g, self.g)\n @property\n def grrr(self): return Float4(self.g, self.r, self.r, self.r)\n @property\n def grrg(self): return Float4(self.g, self.r, self.r, self.g)\n @property\n def grgr(self): return Float4(self.g, self.r, self.g, self.r)\n @property\n def grgg(self): return Float4(self.g, self.r, self.g, self.g)\n @property\n def ggrr(self): return Float4(self.g, self.g, self.r, self.r)\n @property\n def ggrg(self): return Float4(self.g, self.g, self.r, self.g)\n @property\n def gggr(self): return Float4(self.g, self.g, self.g, self.r)\n @property\n def gggg(self): return Float4(self.g, self.g, self.g, self.g)"
},
{
"identifier": "matmul_tl",
"path": "src/tinycio/numerics/linalg.py",
"snippet": "def matmul_tl(im:torch.Tensor, mat:list):\n \"\"\"\n Multiply image tensor by a 3x3 matrix as Python list.\n\n :param im: Input image tensor of shape (C, H, W).\n :type im: torch.Tensor\n :param mat: 3x3 matrix for multiplication.\n :type mat: List[List[float]]\n :return: Result of the matrix multiplication, with the same shape as the input image.\n :rtype: torch.Tensor\n \"\"\"\n # NOTE: Internal - leaving this clutter undocumented intentionally\n C, H, W = im.size()\n out = im.clone().permute(1,2,0).reshape(-1, 1, C)\n mat = torch.tensor(mat).unsqueeze(0).repeat(out.size(0), 1, 1).to(im.device)\n out = torch.bmm(out, mat.transpose(1, 2)).permute(2,0,1).view(C, H, W)\n return out"
}
] | import typing
import torch
import numpy as np
from typing import Union
from enum import IntEnum
from .numerics import Float2, matmul_tl as mm | 6,752 |
mat_xyz_to_rec2020 = [
[1.71665118797127, -0.355670783776393, -0.25336628137366],
[-0.666684351832489, 1.61648123663494, 0.0157685458139111],
[0.0176398574453108, -0.0427706132578085, 0.942103121235474]]
# NOTE: No chromatic adaptation
mat_acescg_to_xyz = [
[ 0.66245418, 0.13400421, 0.15618769],
[ 0.27222872, 0.67408177, 0.05368952],
[-0.00557465, 0.00406073, 1.0103391 ]]
# NOTE: No chromatic adaptation
mat_xyz_to_acescg = [
[ 1.64102338, -0.32480329, -0.2364247 ],
[-0.66366286, 1.61533159, 0.01675635],
[ 0.01172189, -0.00828444, 0.98839486]]
# NOTE: For CIE XYZ color
mat_d60_to_d65 = [
[ 0.98722400,-0.00611327, 0.01595330],
[-0.00759836, 1.00186000, 0.00533002],
[ 0.00307257,-0.00509595, 1.08168000]]
# NOTE: For CIE XYZ color
mat_d65_to_d60 = [
[ 1.01303000, 0.00610531,-0.01497100],
[ 0.00769823, 0.99816500,-0.00503203],
[-0.00284131, 0.00468516, 0.92450700]]
# NOTE: For CIE XYZ color
mat_d65_to_dci = [
[0.976578896646979768, -0.0154362646984919742, -0.016686021704209866],
[-0.0256896658505145926, 1.02853916787996963, -0.00378517365630504153],
[-0.00570574587417104179, 0.0110778657389971485, 0.871176159390377409]]
# NOTE: For CIE XYZ color
mat_dci_to_d65 = [
[1.02449672775257752, 0.0151635410224165156, 0.0196885223342066827],
[0.0256121933371584198, 0.97258630562441342, 0.00471635229242730096],
[0.0063842306500876874, -0.012268082736730219, 1.14794244517367791]]
mat_xyz_to_lms = [
[ 0.8951, 0.2664,-0.1614],
[-0.7502, 1.7135, 0.0367],
[ 0.0389,-0.0685, 1.0296]]
mat_lms_to_xyz = [
[ 0.986993, -0.147054, 0.159963],
[ 0.432305, 0.51836, 0.0492912],
[ -0.00852866, 0.0400428, 0.968487]]
# OKLAB's XYZ to LMS
mat_oklab_m1 = [
[ 0.8189330101, 0.3618667424, -0.1288597137],
[ 0.0329845436, 0.9293118715, 0.0361456387],
[ 0.0482003018, 0.2643662691, 0.6338517070]]
# OKLAB's non-linear L'M'S' to OKLAB
mat_oklab_m2 = [
[ 0.2104542553, 0.7936177850, -0.0040720468],
[ 1.9779984951, -2.4285922050, 0.4505937099],
[ 0.0259040371, 0.7827717662, -0.8086757660]]
# Inverse of OKLAB M1
mat_oklab_m1_inv = [
[ 1.22701385, -0.55779998, 0.28125615],
[-0.04058018, 1.11225687, -0.07167668],
[-0.07638128, -0.42148198, 1.58616322]]
# Inverse of OKLAB M2
mat_oklab_m2_inv = [
[ 1. , 0.39633779, 0.21580376],
[ 1.00000001, -0.10556134, -0.06385417],
[ 1.00000005, -0.08948418, -1.29148554]]
@classmethod
def convert(cls, im:Union[torch.Tensor, ColorImage], source:Variant, destination:Variant) -> torch.Tensor:
"""
Change the color space of an image. Cylindrical transformations HSV/HSL are
treated as their own color spaces and assumed to be relative to sRGB linear.
Unless otherwise noted or required by specification (e.g. ACES), we assume D65 white point.
.. warning::
Tone mapping is not included, so converting the color space of HDR values to
an LDR-designated color space will not automatically reduce dynamic range. For example,
taking an HDR image from :code:`ACESCG` (AP1) to :code:`SRGB` will yield the sRGB
gamma curve, but values outside the required range must still be tone mapped or clamped beforehand.
.. warning::
Cylindrical transformations (HSL, HSV) should be given input in [0, 1] linear sRGB range
(or equivalent). This is not strictly enforced but input outside this range may yield
unpredictable results or *NaN* values.
:param im: [C=3, H, W] image tensor
:type im: torch.Tensor | ColorImage
:param source: color space to convert from
:param destination: color space to convert to
:return: image tensor in designated color space
"""
ip, op = source, destination
cs = cls.Variant
tf = TransferFunction
if ip == op: return im
assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}"
assert source != 0, f"Unknown source color space"
assert ip & cs.SUPPORTED, f"Source color space not supported: {source.name}"
assert op & cs.SUPPORTED, f"Destination color space not supported: {destination.name}"
assert ip & ~cs.DISABLED, f"Source color space disabled: {ColorSpace.Variant(ip).name}"
assert op & ~cs.DISABLED, f"Destination color space disabled: {ColorSpace.Variant(op).name}"
err_not_implemented = f"Color space conversion not implemented: {ColorSpace.Variant(ip).name} to {ColorSpace.Variant(op).name}"
# Direct path where it matters, loop-de-loop elsewhere
if ip == cs.SRGB_LIN:
if op == cs.SRGB: im = tf.srgb_oetf(im)
elif op == cs.REC709: im = tf.rec709_oetf(im)
| from __future__ import annotations
class ColorSpace:
"""
Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are
treated as distinct color spaces. Example:
.. highlight:: python
.. code-block:: python
cs_in = ColorSpace.Variant.SRGB_LIN
cs_out = ColorSpace.Variant.OKLAB
oklab_image = ColorSpace.convert(srgb_image, source=cs_in, destination=cs_out)
"""
class Variant(IntEnum):
"""
Color space enum. For a list of available options, see :ref:`ref_color_spaces`.
"""
UNKNOWN = 1<<0
NONCOLOR = 1<<1
CIE_XYZ = 1<<2
CIE_XYY = 1<<3
SRGB = 1<<4
SRGB_LIN = 1<<5
REC709 = 1<<6
REC2020 = 1<<7
REC2020_LIN = 1<<8
DCI_P3 = 1<<9
DCI_P3_LIN = 1<<10
DISPLAY_P3 = 1<<11
ACESCG = 1<<12
ACESCC = 1<<13
ACESCCT = 1<<14
ACES2065_1 = 1<<15
LMS = 1<<16
OKLAB = 1<<17
CIELAB = 1<<18
CIELUV = 1<<19
HSV = 1<<20
HSL = 1<<21
OKHSV = 1<<22
OKHSL = 1<<23
SCENE_LINEAR = SRGB_LIN | REC2020_LIN | DCI_P3_LIN | ACESCG | ACES2065_1 | CIE_XYZ
PERCEPTUAL = OKLAB | CIELAB | CIELUV | OKHSL | OKHSV
CYLINDRICAL = HSL | HSV | OKHSL | OKHSV
GAMUT_SRGB = SRGB | SRGB_LIN | REC709 | HSL | HSV
GAMUT_AP0 = ACES2065_1
GAMUT_AP1 = ACESCG | ACESCC | ACESCCT
GAMUT_REC2020 = REC2020 | REC2020_LIN
GAMUT_DCI_P3 = DCI_P3 | DCI_P3_LIN
GAMUT_DISPLAY_P3= DISPLAY_P3
GAMUT_OKLAB = OKLAB | OKHSL | OKHSV
GAMUT_CIE_XYZ = CIE_XYZ | CIE_XYY
GAMUT_CIELAB = CIELAB
GAMUT_CIELUV = CIELUV
GAMUT_OTHER = LMS | UNKNOWN | NONCOLOR
WP_D65 = SRGB | SRGB_LIN | REC709 | DISPLAY_P3 | REC2020 | REC2020_LIN | CIE_XYZ | CIE_XYY
WP_CCT_6300 = DCI_P3 | DCI_P3_LIN
WP_CCT_6000 = ACESCG | ACESCC | ACESCCT | ACES2065_1
MODEL_RGB = SRGB | SRGB_LIN | REC709 | REC2020 | REC2020_LIN | DCI_P3 | DCI_P3_LIN | DISPLAY_P3 | \
ACESCG | ACESCC | ACESCCT | ACES2065_1
MODEL_CIE = CIE_XYZ | CIE_XYY | CIELAB | CIELUV
MODEL_CAM = 0
MODEL_YUV = 0
MODEL_OTHER = LMS | HSL | HSV | OKLAB # is OKLAB CAM-based?
NEGATIVE = OKLAB | CIELAB | CIELUV | GAMUT_AP0
NON_NEGATIVE = ~NEGATIVE
DISABLED = CIELUV
UNSUPPORTED = OKHSV | OKHSL # disabled doesn't go here - CS must have alternate path
SUPPORTED = ~UNSUPPORTED
# FIXME: LUV doesn't quite match expected values, needs further testing
mat_xyz_to_srgb = [
[3.24096994190452134, -1.53738317757009346, -0.498610760293003284],
[-0.969243636280879826, 1.87596750150772067, 0.0415550574071756125],
[0.0556300796969936084, -0.203976958888976564, 1.05697151424287856]]
mat_srgb_to_xyz = [
[0.412390799265959481, 0.357584339383877964, 0.180480788401834288],
[0.212639005871510358, 0.715168678767755927, 0.072192315360733715],
[0.0193308187155918507, 0.119194779794625988, 0.950532152249660581]]
mat_srgb_to_acescg = [
[ 0.6130974024, 0.3395231462, 0.04737945141],
[ 0.07019372247, 0.916353879, 0.01345239847],
[ 0.02061559288, 0.1095697729, 0.8698146341]]
# NOTE: Includes "D60"/D65 white point conversion
mat_acescg_to_srgb = [
[ 1.705050993, -0.6217921206,-0.083258872],
[-0.1302564175, 1.140804737, -0.01054831907],
[-0.02400335681,-0.1289689761, 1.152972333]]
# NOTE: Includes "D60"/D65 white point conversion
mat_srgb_to_aces2065_1 = [
[ 0.439632982, 0.382988698, 0.17737832],
[ 0.0897764431, 0.813439429, 0.0967841284],
[ 0.0175411704, 0.111546553, 0.870912277]]
mat_aces2065_1_to_srgb = [
[ 2.52168619, -1.13413099, -0.387555198],
[-0.276479914, 1.37271909, -0.0962391736],
[-0.015378065, -0.152975336, 1.1683534]]
mat_srgb_to_displayp3 = [
[ 0.822461969, 0.177538031, 1.15772692e-10],
[ 0.0331941989, 0.966805801, 1.95085037e-11],
[ 0.0170826307, 0.0723974405, 0.910519929]]
mat_displayp3_to_srgb = [
[ 1.22494018, -0.224940176, -4.77534979e-11],
[-0.0420569547, 1.04205695, 3.37864801e-11],
[-0.0196375546,-0.0786360454, 1.0982736]]
# NOTE: No chromatic adaptation
mat_srgb_to_dcip3 = [
[0.868579739716132409, 0.128919138460847047, 0.00250112182302054368],
[0.0345404102543194426, 0.961811386361919975, 0.0036482033837605824],
[0.0167714290414502718, 0.0710399977868858352, 0.912188573171663893]]
# NOTE: No chromatic adaptation
mat_dcip3_to_srgb = [
[ 1.15751640619975871, -0.154962378073857756, -0.00255402812590095854],
[-0.0415000715306859699, 1.04556792307969925, -0.00406785154901328463],
[-0.0180500389562539583,-0.0785782726530290654, 1.09662831160928302]]
# NOTE: No chromatic adaptation
mat_dcip3_to_xyz = [
[ 0.445169815564552417, 0.277134409206777664, 0.172282669815564564],
[ 0.209491677912730539, 0.721595254161043636, 0.0689130679262258258],
[-3.63410131696985616e-17, 0.0470605600539811521, 0.907355394361973415]]
# NOTE: No chromatic adaptation
mat_xyz_to_dcip3 = [
[2.7253940304917328, -1.01800300622718496, -0.440163195190036463],
[-0.795168025808764195, 1.689732054843624, 0.0226471906084774533],
[0.0412418913957000325, -0.0876390192158623825, 1.10092937864632191]]
mat_srgb_to_rec2020 = [
[ 0.627403896, 0.329283039, 0.0433130657],
[ 0.0690972894, 0.919540395, 0.0113623156],
[ 0.0163914389, 0.0880133077, 0.895595253]]
mat_rec2020_to_srgb = [
[ 1.660491, -0.587641139,-0.0728498633],
[-0.124550475, 1.1328999, -0.00834942258],
[-0.0181507633,-0.100578898, 1.11872966]]
mat_rec2020_to_xyz = [
[0.636958048301291, 0.144616903586208, 0.168880975164172],
[0.262700212011267, 0.677998071518871, 0.059301716469862],
[4.99410657446607e-17, 0.0280726930490874, 1.06098505771079]]
mat_xyz_to_rec2020 = [
[1.71665118797127, -0.355670783776393, -0.25336628137366],
[-0.666684351832489, 1.61648123663494, 0.0157685458139111],
[0.0176398574453108, -0.0427706132578085, 0.942103121235474]]
# NOTE: No chromatic adaptation
mat_acescg_to_xyz = [
[ 0.66245418, 0.13400421, 0.15618769],
[ 0.27222872, 0.67408177, 0.05368952],
[-0.00557465, 0.00406073, 1.0103391 ]]
# NOTE: No chromatic adaptation
mat_xyz_to_acescg = [
[ 1.64102338, -0.32480329, -0.2364247 ],
[-0.66366286, 1.61533159, 0.01675635],
[ 0.01172189, -0.00828444, 0.98839486]]
# NOTE: For CIE XYZ color
mat_d60_to_d65 = [
[ 0.98722400,-0.00611327, 0.01595330],
[-0.00759836, 1.00186000, 0.00533002],
[ 0.00307257,-0.00509595, 1.08168000]]
# NOTE: For CIE XYZ color
mat_d65_to_d60 = [
[ 1.01303000, 0.00610531,-0.01497100],
[ 0.00769823, 0.99816500,-0.00503203],
[-0.00284131, 0.00468516, 0.92450700]]
# NOTE: For CIE XYZ color
mat_d65_to_dci = [
[0.976578896646979768, -0.0154362646984919742, -0.016686021704209866],
[-0.0256896658505145926, 1.02853916787996963, -0.00378517365630504153],
[-0.00570574587417104179, 0.0110778657389971485, 0.871176159390377409]]
# NOTE: For CIE XYZ color
mat_dci_to_d65 = [
[1.02449672775257752, 0.0151635410224165156, 0.0196885223342066827],
[0.0256121933371584198, 0.97258630562441342, 0.00471635229242730096],
[0.0063842306500876874, -0.012268082736730219, 1.14794244517367791]]
mat_xyz_to_lms = [
[ 0.8951, 0.2664,-0.1614],
[-0.7502, 1.7135, 0.0367],
[ 0.0389,-0.0685, 1.0296]]
mat_lms_to_xyz = [
[ 0.986993, -0.147054, 0.159963],
[ 0.432305, 0.51836, 0.0492912],
[ -0.00852866, 0.0400428, 0.968487]]
# OKLAB's XYZ to LMS
mat_oklab_m1 = [
[ 0.8189330101, 0.3618667424, -0.1288597137],
[ 0.0329845436, 0.9293118715, 0.0361456387],
[ 0.0482003018, 0.2643662691, 0.6338517070]]
# OKLAB's non-linear L'M'S' to OKLAB
mat_oklab_m2 = [
[ 0.2104542553, 0.7936177850, -0.0040720468],
[ 1.9779984951, -2.4285922050, 0.4505937099],
[ 0.0259040371, 0.7827717662, -0.8086757660]]
# Inverse of OKLAB M1
mat_oklab_m1_inv = [
[ 1.22701385, -0.55779998, 0.28125615],
[-0.04058018, 1.11225687, -0.07167668],
[-0.07638128, -0.42148198, 1.58616322]]
# Inverse of OKLAB M2
mat_oklab_m2_inv = [
[ 1. , 0.39633779, 0.21580376],
[ 1.00000001, -0.10556134, -0.06385417],
[ 1.00000005, -0.08948418, -1.29148554]]
@classmethod
def convert(cls, im:Union[torch.Tensor, ColorImage], source:Variant, destination:Variant) -> torch.Tensor:
"""
Change the color space of an image. Cylindrical transformations HSV/HSL are
treated as their own color spaces and assumed to be relative to sRGB linear.
Unless otherwise noted or required by specification (e.g. ACES), we assume D65 white point.
.. warning::
Tone mapping is not included, so converting the color space of HDR values to
an LDR-designated color space will not automatically reduce dynamic range. For example,
taking an HDR image from :code:`ACESCG` (AP1) to :code:`SRGB` will yield the sRGB
gamma curve, but values outside the required range must still be tone mapped or clamped beforehand.
.. warning::
Cylindrical transformations (HSL, HSV) should be given input in [0, 1] linear sRGB range
(or equivalent). This is not strictly enforced but input outside this range may yield
unpredictable results or *NaN* values.
:param im: [C=3, H, W] image tensor
:type im: torch.Tensor | ColorImage
:param source: color space to convert from
:param destination: color space to convert to
:return: image tensor in designated color space
"""
ip, op = source, destination
cs = cls.Variant
tf = TransferFunction
if ip == op: return im
assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}"
assert source != 0, f"Unknown source color space"
assert ip & cs.SUPPORTED, f"Source color space not supported: {source.name}"
assert op & cs.SUPPORTED, f"Destination color space not supported: {destination.name}"
assert ip & ~cs.DISABLED, f"Source color space disabled: {ColorSpace.Variant(ip).name}"
assert op & ~cs.DISABLED, f"Destination color space disabled: {ColorSpace.Variant(op).name}"
err_not_implemented = f"Color space conversion not implemented: {ColorSpace.Variant(ip).name} to {ColorSpace.Variant(op).name}"
# Direct path where it matters, loop-de-loop elsewhere
if ip == cs.SRGB_LIN:
if op == cs.SRGB: im = tf.srgb_oetf(im)
elif op == cs.REC709: im = tf.rec709_oetf(im) | elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_srgb_to_rec2020)) | 0 | 2023-12-15 15:39:08+00:00 | 8k |
legalontech-oss/simple-search-query-parser-sample | src/Driver1.py | [
{
"identifier": "SimpleSearchQueryLexer",
"path": "src/parser/SimpleSearchQueryLexer.py",
"snippet": "class SimpleSearchQueryLexer(Lexer):\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n T__0 = 1\n T__1 = 2\n ALPHABETS = 3\n OR_OPERATOR = 4\n AND_OPERATOR = 5\n NOT_OPERATOR = 6\n WHITE_SPACES = 7\n\n channelNames = [ u\"DEFAULT_TOKEN_CHANNEL\", u\"HIDDEN\" ]\n\n modeNames = [ \"DEFAULT_MODE\" ]\n\n literalNames = [ \"<INVALID>\",\n \"'('\", \"')'\", \"'OR'\", \"'AND'\", \"'NOT'\" ]\n\n symbolicNames = [ \"<INVALID>\",\n \"ALPHABETS\", \"OR_OPERATOR\", \"AND_OPERATOR\", \"NOT_OPERATOR\", \n \"WHITE_SPACES\" ]\n\n ruleNames = [ \"T__0\", \"T__1\", \"ALPHABETS\", \"OR_OPERATOR\", \"AND_OPERATOR\", \n \"NOT_OPERATOR\", \"WHITE_SPACES\" ]\n\n grammarFileName = \"SimpleSearchQuery.g4\"\n\n def __init__(self, input=None, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.13.1\")\n self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())\n self._actions = None\n self._predicates = None"
},
{
"identifier": "SimpleSearchQueryParser",
"path": "src/parser/SimpleSearchQueryParser.py",
"snippet": "class SimpleSearchQueryParser ( Parser ):\n\n grammarFileName = \"SimpleSearchQuery.g4\"\n\n atn = ATNDeserializer().deserialize(serializedATN())\n\n decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]\n\n sharedContextCache = PredictionContextCache()\n\n literalNames = [ \"<INVALID>\", \"'('\", \"')'\", \"<INVALID>\", \"'OR'\", \"'AND'\", \n \"'NOT'\" ]\n\n symbolicNames = [ \"<INVALID>\", \"<INVALID>\", \"<INVALID>\", \"ALPHABETS\", \n \"OR_OPERATOR\", \"AND_OPERATOR\", \"NOT_OPERATOR\", \"WHITE_SPACES\" ]\n\n RULE_expr = 0\n RULE_term = 1\n RULE_factor = 2\n RULE_keywords = 3\n RULE_alphabets = 4\n\n ruleNames = [ \"expr\", \"term\", \"factor\", \"keywords\", \"alphabets\" ]\n\n EOF = Token.EOF\n T__0=1\n T__1=2\n ALPHABETS=3\n OR_OPERATOR=4\n AND_OPERATOR=5\n NOT_OPERATOR=6\n WHITE_SPACES=7\n\n def __init__(self, input:TokenStream, output:TextIO = sys.stdout):\n super().__init__(input, output)\n self.checkVersion(\"4.13.1\")\n self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)\n self._predicates = None\n\n\n\n\n class ExprContext(ParserRuleContext):\n __slots__ = 'parser'\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def term(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.TermContext,0)\n\n\n def expr(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.ExprContext,0)\n\n\n def OR_OPERATOR(self):\n return self.getToken(SimpleSearchQueryParser.OR_OPERATOR, 0)\n\n def getRuleIndex(self):\n return SimpleSearchQueryParser.RULE_expr\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitExpr\" ):\n return visitor.visitExpr(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def expr(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = SimpleSearchQueryParser.ExprContext(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 0\n self.enterRecursionRule(localctx, 0, self.RULE_expr, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 11\n self.term(0)\n self._ctx.stop = self._input.LT(-1)\n self.state = 18\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,0,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = SimpleSearchQueryParser.ExprContext(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)\n self.state = 13\n if not self.precpred(self._ctx, 1):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 1)\")\n self.state = 14\n self.match(SimpleSearchQueryParser.OR_OPERATOR)\n self.state = 15\n self.term(0) \n self.state = 20\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,0,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class TermContext(ParserRuleContext):\n __slots__ = 'parser'\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def factor(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.FactorContext,0)\n\n\n def term(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.TermContext,0)\n\n\n def AND_OPERATOR(self):\n return self.getToken(SimpleSearchQueryParser.AND_OPERATOR, 0)\n\n def getRuleIndex(self):\n return SimpleSearchQueryParser.RULE_term\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitTerm\" ):\n return visitor.visitTerm(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n def term(self, _p:int=0):\n _parentctx = self._ctx\n _parentState = self.state\n localctx = SimpleSearchQueryParser.TermContext(self, self._ctx, _parentState)\n _prevctx = localctx\n _startState = 2\n self.enterRecursionRule(localctx, 2, self.RULE_term, _p)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 22\n self.factor()\n self._ctx.stop = self._input.LT(-1)\n self.state = 29\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,1,self._ctx)\n while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:\n if _alt==1:\n if self._parseListeners is not None:\n self.triggerExitRuleEvent()\n _prevctx = localctx\n localctx = SimpleSearchQueryParser.TermContext(self, _parentctx, _parentState)\n self.pushNewRecursionContext(localctx, _startState, self.RULE_term)\n self.state = 24\n if not self.precpred(self._ctx, 1):\n from antlr4.error.Errors import FailedPredicateException\n raise FailedPredicateException(self, \"self.precpred(self._ctx, 1)\")\n self.state = 25\n self.match(SimpleSearchQueryParser.AND_OPERATOR)\n self.state = 26\n self.factor() \n self.state = 31\n self._errHandler.sync(self)\n _alt = self._interp.adaptivePredict(self._input,1,self._ctx)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.unrollRecursionContexts(_parentctx)\n return localctx\n\n\n class FactorContext(ParserRuleContext):\n __slots__ = 'parser'\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def keywords(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.KeywordsContext,0)\n\n\n def NOT_OPERATOR(self):\n return self.getToken(SimpleSearchQueryParser.NOT_OPERATOR, 0)\n\n def getRuleIndex(self):\n return SimpleSearchQueryParser.RULE_factor\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitFactor\" ):\n return visitor.visitFactor(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def factor(self):\n\n localctx = SimpleSearchQueryParser.FactorContext(self, self._ctx, self.state)\n self.enterRule(localctx, 4, self.RULE_factor)\n try:\n self.state = 35\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [1, 3]:\n self.enterOuterAlt(localctx, 1)\n self.state = 32\n self.keywords()\n pass\n elif token in [6]:\n self.enterOuterAlt(localctx, 2)\n self.state = 33\n self.match(SimpleSearchQueryParser.NOT_OPERATOR)\n self.state = 34\n self.keywords()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class KeywordsContext(ParserRuleContext):\n __slots__ = 'parser'\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def expr(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.ExprContext,0)\n\n\n def alphabets(self):\n return self.getTypedRuleContext(SimpleSearchQueryParser.AlphabetsContext,0)\n\n\n def getRuleIndex(self):\n return SimpleSearchQueryParser.RULE_keywords\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitKeywords\" ):\n return visitor.visitKeywords(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def keywords(self):\n\n localctx = SimpleSearchQueryParser.KeywordsContext(self, self._ctx, self.state)\n self.enterRule(localctx, 6, self.RULE_keywords)\n try:\n self.state = 42\n self._errHandler.sync(self)\n token = self._input.LA(1)\n if token in [1]:\n self.enterOuterAlt(localctx, 1)\n self.state = 37\n self.match(SimpleSearchQueryParser.T__0)\n self.state = 38\n self.expr(0)\n self.state = 39\n self.match(SimpleSearchQueryParser.T__1)\n pass\n elif token in [3]:\n self.enterOuterAlt(localctx, 2)\n self.state = 41\n self.alphabets()\n pass\n else:\n raise NoViableAltException(self)\n\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n class AlphabetsContext(ParserRuleContext):\n __slots__ = 'parser'\n\n def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):\n super().__init__(parent, invokingState)\n self.parser = parser\n\n def ALPHABETS(self):\n return self.getToken(SimpleSearchQueryParser.ALPHABETS, 0)\n\n def getRuleIndex(self):\n return SimpleSearchQueryParser.RULE_alphabets\n\n def accept(self, visitor:ParseTreeVisitor):\n if hasattr( visitor, \"visitAlphabets\" ):\n return visitor.visitAlphabets(self)\n else:\n return visitor.visitChildren(self)\n\n\n\n\n def alphabets(self):\n\n localctx = SimpleSearchQueryParser.AlphabetsContext(self, self._ctx, self.state)\n self.enterRule(localctx, 8, self.RULE_alphabets)\n try:\n self.enterOuterAlt(localctx, 1)\n self.state = 44\n self.match(SimpleSearchQueryParser.ALPHABETS)\n except RecognitionException as re:\n localctx.exception = re\n self._errHandler.reportError(self, re)\n self._errHandler.recover(self, re)\n finally:\n self.exitRule()\n return localctx\n\n\n\n def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):\n if self._predicates == None:\n self._predicates = dict()\n self._predicates[0] = self.expr_sempred\n self._predicates[1] = self.term_sempred\n pred = self._predicates.get(ruleIndex, None)\n if pred is None:\n raise Exception(\"No predicate with index:\" + str(ruleIndex))\n else:\n return pred(localctx, predIndex)\n\n def expr_sempred(self, localctx:ExprContext, predIndex:int):\n if predIndex == 0:\n return self.precpred(self._ctx, 1)\n \n\n def term_sempred(self, localctx:TermContext, predIndex:int):\n if predIndex == 1:\n return self.precpred(self._ctx, 1)"
},
{
"identifier": "VisitorInterp",
"path": "src/VisitorInterp.py",
"snippet": "class VisitorInterp(SimpleSearchQueryVisitor):\n def visitExpr(self, ctx: SimpleSearchQueryParser.ExprContext):\n if ctx.getChildCount() == 3:\n opc = ctx.getChild(1).getText()\n v1 = self.visit(ctx.getChild(0))\n v2 = self.visit(ctx.getChild(2))\n return {\"operator\": opc, \"children\": [v1, v2]}\n else:\n return self.visit(ctx.getChild(0))\n\n def visitTerm(self, ctx: SimpleSearchQueryParser.TermContext):\n if ctx.getChildCount() == 3:\n opc = ctx.getChild(1).getText()\n v1 = self.visit(ctx.getChild(0))\n v2 = self.visit(ctx.getChild(2))\n return {\"operator\": opc, \"children\": [v1, v2]}\n else:\n return self.visit(ctx.getChild(0))\n\n def visitFactor(self, ctx: SimpleSearchQueryParser.FactorContext):\n if ctx.getChildCount() == 2:\n opc = ctx.getChild(0).getText()\n v1 = self.visit(ctx.getChild(1))\n return {\"operator\": opc, \"children\": [v1]}\n else:\n return self.visit(ctx.getChild(0))\n\n def visitKeywords(self, ctx: SimpleSearchQueryParser.KeywordsContext):\n if ctx.getChildCount() == 3:\n return self.visit(ctx.getChild(1))\n else:\n return self.visit(ctx.getChild(0))\n\n def visitAlphabets(self, ctx: SimpleSearchQueryParser.AlphabetsContext):\n return {\"value\": ctx.getText()}"
}
] | import sys
from antlr4 import CommonTokenStream, FileStream
from src.parser.SimpleSearchQueryLexer import SimpleSearchQueryLexer
from src.parser.SimpleSearchQueryParser import SimpleSearchQueryParser
from src.VisitorInterp import VisitorInterp | 4,057 |
def main(argv):
input_stream = FileStream(argv[1])
lexer = SimpleSearchQueryLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = SimpleSearchQueryParser(stream)
tree = parser.expr()
if parser.getNumberOfSyntaxErrors() > 0:
print("syntax errors")
else:
|
def main(argv):
input_stream = FileStream(argv[1])
lexer = SimpleSearchQueryLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = SimpleSearchQueryParser(stream)
tree = parser.expr()
if parser.getNumberOfSyntaxErrors() > 0:
print("syntax errors")
else: | vinterp = VisitorInterp() | 2 | 2023-12-19 07:44:19+00:00 | 8k |
thuiar/TCL-MAP | methods/TCL_MAP/manager.py | [
{
"identifier": "restore_model",
"path": "utils/functions.py",
"snippet": "def restore_model(model, model_dir, device):\n output_model_file = os.path.join(model_dir, 'pytorch_model.bin')\n m = torch.load(output_model_file, map_location=device)\n model.load_state_dict(m)\n return model"
},
{
"identifier": "save_model",
"path": "utils/functions.py",
"snippet": "def save_model(model, model_dir):\n\n save_model = model.module if hasattr(model, 'module') else model \n model_file = os.path.join(model_dir, 'pytorch_model.bin')\n\n torch.save(save_model.state_dict(), model_file)"
},
{
"identifier": "EarlyStopping",
"path": "utils/functions.py",
"snippet": "class EarlyStopping:\n \"\"\"Early stops the training if validation loss doesn't improve after a given patience.\"\"\"\n def __init__(self, args, delta=1e-6):\n \"\"\"\n Args:\n patience (int): How long to wait after last time validation loss improved.\n delta (float): Minimum change in the monitored quantity to qualify as an improvement. \n \"\"\"\n self.patience = args.wait_patience\n self.logger = logging.getLogger(args.logger_name)\n self.monitor = args.eval_monitor\n self.counter = 0\n self.best_score = 1e8 if self.monitor == 'loss' else 1e-6\n self.early_stop = False\n self.delta = delta\n self.best_model = None\n\n def __call__(self, score, model):\n \n better_flag = score <= (self.best_score - self.delta) if self.monitor == 'loss' else score >= (self.best_score + self.delta) \n\n if better_flag:\n self.counter = 0\n self.best_model = copy.deepcopy(model)\n self.best_score = score \n\n else:\n self.counter += 1\n self.logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}') \n\n if self.counter >= self.patience:\n self.early_stop = True"
},
{
"identifier": "get_dataloader",
"path": "data/utils.py",
"snippet": "def get_dataloader(args, data):\n\n train_dataloader = DataLoader(data['train'], shuffle=True, batch_size = args.train_batch_size, num_workers = args.num_workers, pin_memory = True)\n dev_dataloader = DataLoader(data['dev'], batch_size = args.eval_batch_size, num_workers = args.num_workers, pin_memory = True)\n test_dataloader = DataLoader(data['test'], batch_size = args.eval_batch_size, num_workers = args.num_workers, pin_memory = True)\n\n return {\n 'train': train_dataloader,\n 'dev': dev_dataloader,\n 'test': test_dataloader\n } "
},
{
"identifier": "AverageMeter",
"path": "utils/metrics.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = float(self.sum) / self.count"
},
{
"identifier": "Metrics",
"path": "utils/metrics.py",
"snippet": "class Metrics(object):\n \"\"\"\n column of confusion matrix: predicted index\n row of confusion matrix: target index\n \"\"\"\n def __init__(self, args):\n\n self.logger = logging.getLogger(args.logger_name)\n self.eval_metrics = ['acc', 'f1', 'prec', 'rec', 'weighted_f1', 'weighted_prec', 'weighted_rec']\n\n def __call__(self, y_true, y_pred, show_results = False):\n\n acc_score = self._acc_score(y_true, y_pred)\n macro_f1, weighted_f1 = self._f1_score(y_true, y_pred)\n macro_prec, weighted_prec = self._precision_score(y_true, y_pred)\n macro_rec, weighted_rec = self._recall_score(y_true, y_pred)\n \n eval_results = {\n 'acc': acc_score,\n 'f1': macro_f1,\n 'weighted_f1': weighted_f1,\n 'prec': macro_prec,\n 'weighted_prec': weighted_prec,\n 'rec': macro_rec,\n 'weighted_rec': weighted_rec\n }\n \n if show_results:\n \n self._show_confusion_matrix(y_true, y_pred)\n\n self.logger.info(\"***** In-domain Evaluation results *****\")\n for key in sorted(eval_results.keys()):\n self.logger.info(\" %s = %s\", key, str(round(eval_results[key], 4)))\n\n return eval_results\n\n def _acc_score(self, y_true, y_pred):\n return accuracy_score(y_true, y_pred)\n \n def _f1_score(self, y_true, y_pred):\n return f1_score(y_true, y_pred, average='macro'), f1_score(y_true, y_pred, average='weighted')\n \n def _precision_score(self, y_true, y_pred):\n return precision_score(y_true, y_pred, average='macro'), precision_score(y_true, y_pred, average='weighted')\n\n def _recall_score(self, y_true, y_pred):\n return recall_score(y_true, y_pred, average='macro'), recall_score(y_true, y_pred, average='weighted')\n\n def _show_confusion_matrix(self, y_true, y_pred):\n cm = confusion_matrix(y_true, y_pred)\n self.logger.info(\"***** Test: Confusion Matrix *****\")\n self.logger.info(\"%s\", str(cm))"
},
{
"identifier": "TCL_MAP",
"path": "methods/TCL_MAP/model.py",
"snippet": "class TCL_MAP(nn.Module):\n def __init__(self, args):\n \n super(TCL_MAP, self).__init__()\n \n self.model = MAP_Model.from_pretrained(args.text_backbone, cache_dir = args.cache_path, args = args)\n self.cons_model = Cons_Model.from_pretrained(args.text_backbone, cache_dir = args.cache_path, args = args)\n \n self.ctx_vectors = self._init_ctx(args)\n self.ctx = nn.Parameter(self.ctx_vectors)\n\n self.label_len = args.label_len\n args.feat_size = args.text_feat_dim\n args.video_feat_size = args.video_feat_dim\n args.audio_feat_size = args.audio_feat_dim\n\n def _init_ctx(self, args):\n ctx = torch.empty(args.prompt_len, args.text_feat_dim, dtype=torch.float)\n nn.init.trunc_normal_(ctx)\n return ctx\n\n \n def forward(self, text_feats, video_feats, audio_feats, cons_text_feats, condition_idx):\n video_feats = video_feats.float()\n audio_feats = audio_feats.float()\n\n # process normal sample\n outputs, pooled_output, condition, generated_ctx = self.model(\n text = text_feats,\n visual = video_feats,\n acoustic = audio_feats,\n condition_idx=condition_idx, \n ctx=self.ctx\n )\n\n # process augmented sample\n cons_input_ids, cons_input_mask, cons_segment_ids = cons_text_feats[:, 0], cons_text_feats[:, 1], cons_text_feats[:, 2]\n cons_outputs = self.cons_model(\n input_ids = cons_input_ids, \n condition_idx=condition_idx,\n ctx=generated_ctx,\n token_type_ids = cons_segment_ids, \n attention_mask = cons_input_mask\n )\n last_hidden_state = cons_outputs.last_hidden_state\n cons_condition_tuple = tuple(last_hidden_state[torch.arange(last_hidden_state.shape[0]), condition_idx.view(-1) + i, :].unsqueeze(1) for i in range(self.label_len))\n cons_condition = torch.cat(cons_condition_tuple, dim=1)\n\n # return classification feature and Label/[MASK] token representation\n return outputs[0], pooled_output, condition.mean(dim=1), cons_condition.mean(dim=1)"
},
{
"identifier": "SupConLoss",
"path": "methods/TCL_MAP/loss.py",
"snippet": "class SupConLoss(nn.Module):\n \"\"\"Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.\n It also supports the unsupervised contrastive loss in SimCLR\"\"\"\n def __init__(self, temperature=0.07, contrast_mode='all'):\n super(SupConLoss, self).__init__()\n self.temperature = temperature\n self.contrast_mode = contrast_mode\n\n def forward(self, features, labels=None, mask=None):\n \"\"\"Compute loss for model. If both `labels` and `mask` are None,\n it degenerates to SimCLR unsupervised loss:\n https://arxiv.org/pdf/2002.05709.pdf\n\n Args:\n features: hidden vector of shape [bsz, n_views, ...].\n labels: ground truth of shape [bsz].\n mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j\n has the same class as sample i. Can be asymmetric.\n Returns:\n A loss scalar.\n \"\"\"\n device = (torch.device('cuda')\n if features.is_cuda\n else torch.device('cpu'))\n\n if len(features.shape) < 3:\n raise ValueError('`features` needs to be [bsz, n_views, ...],'\n 'at least 3 dimensions are required')\n if len(features.shape) > 3:\n features = features.view(features.shape[0], features.shape[1], -1)\n\n features = F.normalize(features, dim=2)\n batch_size = features.shape[0]\n if labels is not None and mask is not None:\n raise ValueError('Cannot define both `labels` and `mask`')\n elif labels is None and mask is None:\n mask = torch.eye(batch_size, dtype=torch.float32).to(device)\n elif labels is not None:\n labels = labels.contiguous().view(-1, 1)\n if labels.shape[0] != batch_size:\n raise ValueError('Num of labels does not match num of features')\n mask = torch.eq(labels, labels.T).float().to(device)\n else:\n mask = mask.float().to(device)\n\n contrast_count = features.shape[1]\n contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)\n if self.contrast_mode == 'one':\n anchor_feature = features[:, 0]\n anchor_count = 1\n elif self.contrast_mode == 'all':\n anchor_feature = contrast_feature\n anchor_count = contrast_count\n else:\n raise ValueError('Unknown mode: {}'.format(self.contrast_mode))\n\n # compute logits\n anchor_dot_contrast = torch.div(\n torch.matmul(anchor_feature, contrast_feature.T),\n self.temperature)\n # for numerical stability\n logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)\n logits = anchor_dot_contrast - logits_max.detach()\n \n\n # tile mask\n mask = mask.repeat(anchor_count, contrast_count)\n # mask-out self-contrast cases\n logits_mask = torch.scatter(\n torch.ones_like(mask),\n 1,\n torch.arange(batch_size * anchor_count).view(-1, 1).to(device),\n 0\n )\n mask = mask * logits_mask\n\n # compute log_prob\n exp_logits = torch.exp(logits) * logits_mask\n log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))\n\n # compute mean of log-likelihood over positive\n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\n\n # loss\n loss = - mean_log_prob_pos\n loss = loss.view(anchor_count, batch_size).mean()\n\n return loss"
}
] | import torch
import torch.nn.functional as F
import logging
import numpy as np
from torch import nn
from utils.functions import restore_model, save_model, EarlyStopping
from tqdm import trange, tqdm
from data.utils import get_dataloader
from utils.metrics import AverageMeter, Metrics
from transformers import AdamW, get_linear_schedule_with_warmup
from .model import TCL_MAP
from .loss import SupConLoss | 3,858 |
__all__ = ['TCL_MAP_manager']
class TCL_MAP_manager:
def __init__(self, args, data):
self.logger = logging.getLogger(args.logger_name)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
args.device = self.device
self.model = TCL_MAP(args)
self.model.to(self.device)
self.optimizer, self.scheduler = self._set_optimizer(args, self.model)
mm_dataloader = get_dataloader(args, data.mm_data)
self.train_dataloader, self.eval_dataloader, self.test_dataloader = \
mm_dataloader['train'], mm_dataloader['dev'], mm_dataloader['test']
self.args = args
self.criterion = nn.CrossEntropyLoss()
self.cons_criterion = SupConLoss(temperature=args.temperature)
self.metrics = Metrics(args)
if args.train:
self.best_eval_score = 0
else:
self.model = restore_model(self.model, args.model_output_path, self.device)
def _set_optimizer(self, args, model):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr = args.lr, correct_bias=False)
num_train_optimization_steps = int(args.num_train_examples / args.train_batch_size) * args.num_train_epochs
num_warmup_steps= int(args.num_train_examples * args.num_train_epochs * args.warmup_proportion / args.train_batch_size)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_train_optimization_steps)
return optimizer, scheduler
def _train(self, args):
early_stopping = EarlyStopping(args)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
self.model.train()
loss_record = AverageMeter()
cons_loss_record = AverageMeter()
cls_loss_record = AverageMeter()
for step, batch in enumerate(tqdm(self.train_dataloader, desc="Iteration")):
text_feats = batch['text_feats'].to(self.device)
cons_text_feats = batch['cons_text_feats'].to(self.device)
condition_idx = batch['condition_idx'].to(self.device)
video_feats = batch['video_feats'].to(self.device)
audio_feats = batch['audio_feats'].to(self.device)
label_ids = batch['label_ids'].to(self.device)
with torch.set_grad_enabled(True):
logits, _, condition, cons_condition = self.model(text_feats, video_feats, audio_feats, cons_text_feats, condition_idx)
cons_feature = torch.cat((condition.unsqueeze(1), cons_condition.unsqueeze(1)), dim=1)
cons_loss = self.cons_criterion(cons_feature)
cls_loss = self.criterion(logits, label_ids)
loss = cls_loss + cons_loss
self.optimizer.zero_grad()
loss.backward()
loss_record.update(loss.item(), label_ids.size(0))
cons_loss_record.update(cons_loss.item(), label_ids.size(0))
cls_loss_record.update(cls_loss.item(), label_ids.size(0))
if args.grad_clip != -1.0:
nn.utils.clip_grad_value_([param for param in self.model.parameters() if param.requires_grad], args.grad_clip)
self.optimizer.step()
self.scheduler.step()
outputs = self._get_outputs(args, self.eval_dataloader)
eval_score = outputs[args.eval_monitor]
eval_results = {
'train_loss': round(loss_record.avg, 4),
'cons_loss': round(cons_loss_record.avg, 4),
'cls_loss': round(cls_loss_record.avg, 4),
'eval_score': round(eval_score, 4),
'best_eval_score': round(early_stopping.best_score, 4),
}
self.logger.info("***** Epoch: %s: Eval results *****", str(epoch + 1))
for key in eval_results.keys():
self.logger.info(" %s = %s", key, str(eval_results[key]))
early_stopping(eval_score, self.model)
if early_stopping.early_stop:
self.logger.info(f'EarlyStopping at epoch {epoch + 1}')
break
self.best_eval_score = early_stopping.best_score
self.model = early_stopping.best_model
|
__all__ = ['TCL_MAP_manager']
class TCL_MAP_manager:
def __init__(self, args, data):
self.logger = logging.getLogger(args.logger_name)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
args.device = self.device
self.model = TCL_MAP(args)
self.model.to(self.device)
self.optimizer, self.scheduler = self._set_optimizer(args, self.model)
mm_dataloader = get_dataloader(args, data.mm_data)
self.train_dataloader, self.eval_dataloader, self.test_dataloader = \
mm_dataloader['train'], mm_dataloader['dev'], mm_dataloader['test']
self.args = args
self.criterion = nn.CrossEntropyLoss()
self.cons_criterion = SupConLoss(temperature=args.temperature)
self.metrics = Metrics(args)
if args.train:
self.best_eval_score = 0
else:
self.model = restore_model(self.model, args.model_output_path, self.device)
def _set_optimizer(self, args, model):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr = args.lr, correct_bias=False)
num_train_optimization_steps = int(args.num_train_examples / args.train_batch_size) * args.num_train_epochs
num_warmup_steps= int(args.num_train_examples * args.num_train_epochs * args.warmup_proportion / args.train_batch_size)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_train_optimization_steps)
return optimizer, scheduler
def _train(self, args):
early_stopping = EarlyStopping(args)
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
self.model.train()
loss_record = AverageMeter()
cons_loss_record = AverageMeter()
cls_loss_record = AverageMeter()
for step, batch in enumerate(tqdm(self.train_dataloader, desc="Iteration")):
text_feats = batch['text_feats'].to(self.device)
cons_text_feats = batch['cons_text_feats'].to(self.device)
condition_idx = batch['condition_idx'].to(self.device)
video_feats = batch['video_feats'].to(self.device)
audio_feats = batch['audio_feats'].to(self.device)
label_ids = batch['label_ids'].to(self.device)
with torch.set_grad_enabled(True):
logits, _, condition, cons_condition = self.model(text_feats, video_feats, audio_feats, cons_text_feats, condition_idx)
cons_feature = torch.cat((condition.unsqueeze(1), cons_condition.unsqueeze(1)), dim=1)
cons_loss = self.cons_criterion(cons_feature)
cls_loss = self.criterion(logits, label_ids)
loss = cls_loss + cons_loss
self.optimizer.zero_grad()
loss.backward()
loss_record.update(loss.item(), label_ids.size(0))
cons_loss_record.update(cons_loss.item(), label_ids.size(0))
cls_loss_record.update(cls_loss.item(), label_ids.size(0))
if args.grad_clip != -1.0:
nn.utils.clip_grad_value_([param for param in self.model.parameters() if param.requires_grad], args.grad_clip)
self.optimizer.step()
self.scheduler.step()
outputs = self._get_outputs(args, self.eval_dataloader)
eval_score = outputs[args.eval_monitor]
eval_results = {
'train_loss': round(loss_record.avg, 4),
'cons_loss': round(cons_loss_record.avg, 4),
'cls_loss': round(cls_loss_record.avg, 4),
'eval_score': round(eval_score, 4),
'best_eval_score': round(early_stopping.best_score, 4),
}
self.logger.info("***** Epoch: %s: Eval results *****", str(epoch + 1))
for key in eval_results.keys():
self.logger.info(" %s = %s", key, str(eval_results[key]))
early_stopping(eval_score, self.model)
if early_stopping.early_stop:
self.logger.info(f'EarlyStopping at epoch {epoch + 1}')
break
self.best_eval_score = early_stopping.best_score
self.model = early_stopping.best_model
| if args.save_model: | 1 | 2023-12-20 03:12:38+00:00 | 8k |
replicate/cog-marigold | predict.py | [
{
"identifier": "seed_all",
"path": "src/util/seed_all.py",
"snippet": "def seed_all(seed: int = 0):\n \"\"\"\n Set random seeds of all components.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)"
},
{
"identifier": "find_batch_size",
"path": "src/util/batchsize.py",
"snippet": "def find_batch_size(n_repeat, input_res):\n total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3\n \n for settings in sorted(bs_search_table, key=lambda k: (k['res'], -k['total_vram'])):\n if input_res <= settings['res'] and total_vram >= settings['total_vram']:\n bs = settings['bs']\n if bs > n_repeat:\n bs = n_repeat\n elif bs > math.ceil(n_repeat / 2) and bs < n_repeat:\n bs = math.ceil(n_repeat / 2)\n return bs\n return 1"
},
{
"identifier": "ensemble_depths",
"path": "src/util/ensemble.py",
"snippet": "def ensemble_depths(input_images, regularizer_strength=0.02, max_iter=2, tol=1e-3, reduction='median', max_res=None, disp=False, device='cuda'):\n \"\"\" \n To ensemble multiple affine-invariant depth images (up to scale and shift),\n by aligning estimating the scale and shift\n \"\"\"\n original_input = input_images.clone()\n n_img = input_images.shape[0]\n ori_shape = input_images.shape\n \n if max_res is not None:\n scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))\n if scale_factor < 1:\n downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode='nearest')\n input_images = downscaler(torch.from_numpy(input_images)).numpy()\n\n # init guess\n _min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)\n _max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1)\n s_init = 1.0 / (_max - _min).reshape((-1, 1, 1))\n t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1))\n x = np.concatenate([s_init, t_init]).reshape(-1)\n\n input_images = input_images.to(device)\n \n # objective function\n def closure(x):\n l = len(x)\n s = x[:int(l/2)]\n t = x[int(l/2):]\n s = torch.from_numpy(s).to(device)\n t = torch.from_numpy(t).to(device)\n \n transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))\n dists = inter_distances(transformed_arrays)\n sqrt_dist = torch.sqrt(torch.mean(dists**2))\n \n if 'mean' == reduction:\n pred = torch.mean(transformed_arrays, dim=0)\n elif 'median' == reduction:\n pred = torch.median(transformed_arrays, dim=0).values\n else:\n raise ValueError\n \n near_err = torch.sqrt((0 - torch.min(pred))**2)\n far_err = torch.sqrt((1 - torch.max(pred))**2)\n \n err = sqrt_dist + (near_err + far_err) * regularizer_strength\n err = err.detach().cpu().numpy()\n return err\n \n res = minimize(closure, x, method='BFGS', tol=tol, options={'maxiter': max_iter, 'disp': disp})\n x = res.x\n l = len(x)\n s = x[:int(l/2)]\n t = x[int(l/2):]\n \n # Prediction\n s = torch.from_numpy(s).to(device)\n t = torch.from_numpy(t).to(device)\n transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1)\n if 'mean' == reduction:\n aligned_images = torch.mean(transformed_arrays, dim=0)\n std = torch.std(transformed_arrays, dim=0)\n uncertainty = std\n elif 'median' == reduction:\n aligned_images = torch.median(transformed_arrays, dim=0).values\n # MAD (median absolute deviation) as uncertainty indicator\n abs_dev = torch.abs(transformed_arrays - aligned_images)\n mad = torch.median(abs_dev, dim=0).values\n uncertainty = mad\n else:\n raise ValueError\n \n # Scale and shift to [0, 1]\n _min = torch.min(aligned_images)\n _max = torch.max(aligned_images)\n aligned_images = (aligned_images - _min) / (_max - _min)\n uncertainty /= (_max - _min)\n \n return aligned_images, uncertainty"
},
{
"identifier": "MarigoldPipeline",
"path": "src/model/marigold_pipeline.py",
"snippet": "class MarigoldPipeline(nn.Module):\n \"\"\"\n Marigold monocular depth estimator.\n \"\"\"\n\n def __init__(\n self,\n unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx}\n rgb_encoder_pretrained_path: Dict,\n depht_ae_pretrained_path: Dict,\n noise_scheduler_pretrained_path: Dict,\n tokenizer_pretrained_path: Dict,\n text_encoder_pretrained_path: Dict,\n empty_text_embed=None,\n trainable_unet=False,\n rgb_latent_scale_factor=0.18215,\n depth_latent_scale_factor=0.18215,\n noise_scheduler_type=\"DDIMScheduler\",\n enable_gradient_checkpointing=False,\n enable_xformers=True,\n ) -> None:\n super().__init__()\n\n self.rgb_latent_scale_factor = rgb_latent_scale_factor\n self.depth_latent_scale_factor = depth_latent_scale_factor\n self.device = \"cpu\"\n\n # ******* Initialize modules *******\n # Trainable modules\n self.trainable_module_dic: Dict[str, nn.Module] = {}\n self.trainable_unet = trainable_unet\n\n # Denoising UNet\n self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained(\n unet_pretrained_path[\"path\"], subfolder=unet_pretrained_path[\"subfolder\"]\n )\n logging.info(f\"pretrained UNet loaded from: {unet_pretrained_path}\")\n if 8 != self.unet.config[\"in_channels\"]:\n self._replace_unet_conv_in()\n logging.warning(\"Unet conv_in layer is replaced\")\n if enable_xformers:\n self.unet.enable_xformers_memory_efficient_attention()\n else:\n self.unet.disable_xformers_memory_efficient_attention()\n\n # Image encoder\n self.rgb_encoder = RGBEncoder(\n pretrained_path=rgb_encoder_pretrained_path[\"path\"],\n subfolder=rgb_encoder_pretrained_path[\"subfolder\"],\n )\n logging.info(\n f\"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}\"\n )\n self.rgb_encoder.requires_grad_(False)\n\n # Depth encoder-decoder\n self.depth_ae = StackedDepthAE(\n pretrained_path=depht_ae_pretrained_path[\"path\"],\n subfolder=depht_ae_pretrained_path[\"subfolder\"],\n )\n logging.info(\n f\"pretrained Depth Autoencoder loaded from: {rgb_encoder_pretrained_path}\"\n )\n\n # Trainability\n # unet\n if self.trainable_unet:\n self.unet.requires_grad_(True)\n self.trainable_module_dic[\"unet\"] = self.unet\n logging.debug(f\"UNet is set to trainable\")\n else:\n self.unet.requires_grad_(False)\n logging.debug(f\"UNet is set to frozen\")\n\n # Gradient checkpointing\n if enable_gradient_checkpointing:\n self.unet.enable_gradient_checkpointing()\n self.depth_ae.vae.enable_gradient_checkpointing()\n\n # Noise scheduler\n if \"DDPMScheduler\" == noise_scheduler_type:\n self.noise_scheduler: SchedulerMixin = DDPMScheduler.from_pretrained(\n noise_scheduler_pretrained_path[\"path\"],\n subfolder=noise_scheduler_pretrained_path[\"subfolder\"],\n )\n elif \"DDIMScheduler\" == noise_scheduler_type:\n self.noise_scheduler: SchedulerMixin = DDIMScheduler.from_pretrained(\n noise_scheduler_pretrained_path[\"path\"],\n subfolder=noise_scheduler_pretrained_path[\"subfolder\"],\n )\n elif \"PNDMScheduler\" == noise_scheduler_type:\n self.noise_scheduler: SchedulerMixin = PNDMScheduler.from_pretrained(\n noise_scheduler_pretrained_path[\"path\"],\n subfolder=noise_scheduler_pretrained_path[\"subfolder\"],\n )\n else:\n raise NotImplementedError\n\n # Text embed for empty prompt (always in CPU)\n if empty_text_embed is None:\n tokenizer: CLIPTokenizer = CLIPTokenizer.from_pretrained(\n tokenizer_pretrained_path[\"path\"],\n subfolder=tokenizer_pretrained_path[\"subfolder\"],\n )\n text_encoder: CLIPTextModel = CLIPTextModel.from_pretrained(\n text_encoder_pretrained_path[\"path\"],\n subfolder=text_encoder_pretrained_path[\"subfolder\"],\n )\n with torch.no_grad():\n self.empty_text_embed = self._encode_text(\n \"\", tokenizer, text_encoder\n ).detach() # [1, 2, 1024]\n else:\n self.empty_text_embed = empty_text_embed\n\n def from_pretrained(pretrained_path, **kwargs):\n return __class__(\n unet_pretrained_path={\"path\": pretrained_path, \"subfolder\": \"unet\"},\n rgb_encoder_pretrained_path={\"path\": pretrained_path, \"subfolder\": \"vae\"},\n depht_ae_pretrained_path={\"path\": pretrained_path, \"subfolder\": \"vae\"},\n noise_scheduler_pretrained_path={\n \"path\": pretrained_path,\n \"subfolder\": \"scheduler\",\n },\n tokenizer_pretrained_path={\n \"path\": pretrained_path,\n \"subfolder\": \"tokenizer\",\n },\n text_encoder_pretrained_path={\n \"path\": pretrained_path,\n \"subfolder\": \"text_encoder\",\n },\n **kwargs,\n )\n\n def _replace_unet_conv_in(self):\n # Replace the first layer to accept 8 in_channels. Only applied when loading pretrained SD U-Net\n _weight = self.unet.conv_in.weight.clone() # [320, 4, 3, 3]\n _bias = self.unet.conv_in.bias.clone() # [320]\n _weight = _weight.repeat((1, 2, 1, 1)) # Keep selected channel(s)\n # half the activation magnitude\n _weight *= 0.5\n _bias *= 0.5\n # new conv_in channel\n _n_convin_out_channel = self.unet.conv_in.out_channels\n _new_conv_in = Conv2d(\n 8, _n_convin_out_channel, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)\n )\n _new_conv_in.weight = Parameter(_weight)\n _new_conv_in.bias = Parameter(_bias)\n self.unet.conv_in = _new_conv_in\n # replace config\n self.unet.config[\"in_channels\"] = 8\n return\n\n def to(self, device):\n self.rgb_encoder.to(device)\n self.depth_ae.to(device)\n self.unet.to(device)\n self.empty_text_embed = self.empty_text_embed.to(device)\n self.device = device\n return self\n\n def forward(\n self,\n rgb_in,\n num_inference_steps: int = 50,\n num_output_inter_results: int = 0,\n show_pbar=False,\n init_depth_latent=None,\n return_depth_latent=False,\n ):\n device = rgb_in.device\n\n # Set timesteps\n self.noise_scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.noise_scheduler.timesteps # [T]\n\n # Encode image\n rgb_latent = self.encode_rgb(rgb_in)\n\n # Initial depth map (noise)\n if init_depth_latent is not None:\n assert (\n init_depth_latent.shape == rgb_latent.shape\n ), \"initial depth latent should be the size of [B, 4, H/8, W/8]\"\n depth_latent = init_depth_latent\n else:\n depth_latent = torch.randn(rgb_latent.shape, device=device) # [B, 4, h, w]\n\n # Expand text embeding for batch\n batch_empty_text_embed = self.empty_text_embed.repeat(\n (rgb_latent.shape[0], 1, 1)\n ) # [B, 2, 1024]\n\n # Export intermediate denoising steps\n if num_output_inter_results > 0:\n depth_latent_ls = []\n inter_steps = []\n _idx = (\n -1\n * (\n np.arange(0, num_output_inter_results)\n * num_inference_steps\n / num_output_inter_results\n )\n .round()\n .astype(int)\n - 1\n )\n steps_to_output = timesteps[_idx]\n\n # Denoising loop\n if show_pbar:\n iterable = tqdm(enumerate(timesteps), total=len(timesteps), leave=False, desc=\"denoising\")\n else:\n iterable = enumerate(timesteps)\n for i, t in iterable:\n unet_input = torch.cat(\n [rgb_latent, depth_latent], dim=1\n ) # this order is important\n\n # predict the noise residual\n noise_pred = self.unet(\n unet_input, t, encoder_hidden_states=batch_empty_text_embed\n ).sample # [B, 4, h, w]\n\n # compute the previous noisy sample x_t -> x_t-1\n depth_latent = self.noise_scheduler.step(\n noise_pred, t, depth_latent\n ).prev_sample\n\n if num_output_inter_results > 0 and t in steps_to_output:\n depth_latent_ls.append(depth_latent.detach().clone())\n inter_steps.append(t - 1)\n\n # Decode depth latent\n if num_output_inter_results > 0:\n assert 0 in inter_steps\n depth = [self.decode_depth(lat) for lat in depth_latent_ls]\n if return_depth_latent:\n return depth, inter_steps, depth_latent_ls\n else:\n return depth, inter_steps\n else:\n depth = self.decode_depth(depth_latent)\n if return_depth_latent:\n return depth, depth_latent\n else:\n return depth\n\n def encode_rgb(self, rgb_in):\n rgb_latent = self.rgb_encoder(rgb_in) # [B, 4, h, w]\n rgb_latent = rgb_latent * self.rgb_latent_scale_factor\n return rgb_latent\n\n def encode_depth(self, depth_in):\n depth_latent = self.depth_ae.encode(depth_in)\n depth_latent = depth_latent * self.depth_latent_scale_factor\n return depth_latent\n\n def decode_depth(self, depth_latent):\n depth_latent = depth_latent / self.depth_latent_scale_factor\n depth = self.depth_ae.decode(depth_latent) # [B, 1, H, W]\n return depth\n\n @staticmethod\n def _encode_text(prompt, tokenizer, text_encoder):\n text_inputs = tokenizer(\n prompt,\n padding=\"do_not_pad\",\n max_length=tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids.to(text_encoder.device)\n text_embed = text_encoder(text_input_ids)[0]\n return text_embed"
},
{
"identifier": "chw2hwc",
"path": "src/util/image_util.py",
"snippet": "def chw2hwc(chw):\n assert 3 == len(chw.shape)\n if isinstance(chw, torch.Tensor):\n hwc = torch.permute(chw, (1, 2, 0))\n elif isinstance(chw, np.ndarray):\n hwc = np.moveaxis(chw, 0, -1)\n return hwc"
},
{
"identifier": "colorize_depth_maps",
"path": "src/util/image_util.py",
"snippet": "def colorize_depth_maps(depth_map, min_depth, max_depth, cmap='Spectral', valid_mask=None):\n \"\"\"\n Colorize depth maps.\n \"\"\"\n assert len(depth_map.shape) >= 2, \"Invalid dimension\"\n \n if isinstance(depth_map, torch.Tensor):\n depth = depth_map.detach().clone().squeeze().numpy()\n elif isinstance(depth_map, np.ndarray):\n depth = depth_map.copy().squeeze()\n # reshape to [ (B,) H, W ]\n if depth.ndim < 3:\n depth = depth[np.newaxis, :, :]\n \n # colorize\n cm = matplotlib.colormaps[cmap]\n depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)\n img_colored_np = cm(depth, bytes=False)[:,:,:,0:3] # value from 0 to 1\n img_colored_np = np.rollaxis(img_colored_np, 3, 1)\n \n if valid_mask is not None:\n if isinstance(depth_map, torch.Tensor):\n valid_mask = valid_mask.detach().numpy()\n valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]\n if valid_mask.ndim < 3:\n valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]\n else:\n valid_mask = valid_mask[:, np.newaxis, :, :]\n valid_mask = np.repeat(valid_mask, 3, axis=1)\n img_colored_np[~valid_mask] = 0\n \n if isinstance(depth_map, torch.Tensor):\n img_colored = torch.from_numpy(img_colored_np).float()\n elif isinstance(depth_map, np.ndarray):\n img_colored = img_colored_np\n \n return img_colored"
},
{
"identifier": "resize_max_res",
"path": "src/util/image_util.py",
"snippet": "def resize_max_res(img: Image.Image, max_edge_resolution):\n original_width, original_height = img.size\n downscale_factor = min(max_edge_resolution / original_width, max_edge_resolution / original_height)\n \n new_width = int(original_width * downscale_factor)\n new_height = int(original_height * downscale_factor)\n \n resized_img = img.resize((new_width, new_height))\n return resized_img"
}
] | import os
import time
import shutil
import subprocess
import torch
import numpy as np
from glob import glob
from typing import List
from PIL import Image
from tqdm.auto import tqdm
from torch.utils.data import DataLoader, TensorDataset
from cog import BasePredictor, Input, Path
from src.util.seed_all import seed_all
from src.util.batchsize import find_batch_size
from src.util.ensemble import ensemble_depths
from src.model.marigold_pipeline import MarigoldPipeline
from src.util.image_util import chw2hwc, colorize_depth_maps, resize_max_res | 5,822 |
def predict(
self,
image: Path = Input(description="Input image, use an RGB image for optimal results."),
resize_input: bool = Input(description="Resize the original input resolution to max resolution.", default=True),
num_infer: int = Input(
ge=1, le=20, default=10,
description="Number of inferences to be ensembled, a higher number gives better results but runs slower."
),
denoise_steps: int = Input(
ge=1, le=50, default=10,
description="Inference denoising steps, more steps results in higher accuracy but slower inference speed."
),
regularizer_strength: float = Input(
ge=0.0, le=1, default=0.02,
description="Ensembling parameter, weight of optimization regularizer.",
),
reduction_method: str = Input(
choices=["mean", "median"], default="median",
description="Ensembling parameter, method to merge aligned depth maps."
),
max_iter: int = Input(ge=1, le=20, default=5, description="Ensembling parameter, max optimization iterations."),
seed: int = Input(description="Seed for reproducibility, set to random if left as None.", default=None),
) -> List[Path]:
"""Run a single prediction on the model"""
if seed is None:
seed = int(time.time())
seed_all(seed)
resize_back = True if resize_input else False
n_repeat = num_infer
merging_max_res = None
# Read input image
input_image = Image.open(str(image))
input_size = input_image.size
# Resize image
if resize_input:
input_image = resize_max_res(input_image, max_edge_resolution=768)
# Convert the image to RGB, to 1.remove the alpha channel 2.convert B&W to 3-channel
input_image = input_image.convert("RGB")
image = np.asarray(input_image)
# Normalize rgb values
rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W]
rgb_norm = rgb / 255.0
rgb_norm = torch.from_numpy(rgb_norm).float()
rgb_norm = rgb_norm.to(self.device)
assert rgb_norm.min() >= 0.0 and rgb_norm.max() <= 1.0
# Batch repeated input image
duplicated_rgb = torch.stack([rgb_norm] * n_repeat)
single_rgb_dataset = TensorDataset(duplicated_rgb)
_bs = find_batch_size(n_repeat=n_repeat, input_res=max(rgb_norm.shape[1:]))
single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
# inference
with torch.no_grad():
# Predict depth maps (batched)
depth_pred_ls = []
for batch in tqdm(single_rgb_loader, desc="multiple inference", leave=False):
(batched_img,) = batch
depth_pred_raw = self.model.forward(
batched_img,
num_inference_steps=denoise_steps,
init_depth_latent=None,
show_pbar=True
)
# clip prediction
depth_pred_raw = torch.clip(depth_pred_raw, -1.0, 1.0)
# shift to [0, 1]
depth_pred_raw = depth_pred_raw * 2.0 - 1.0
depth_pred_ls.append(depth_pred_raw.detach().clone())
depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze()
torch.cuda.empty_cache()
# Test-time ensembling
if n_repeat > 1:
depth_pred, pred_uncert = ensemble_depths(
depth_preds,
regularizer_strength=regularizer_strength,
max_iter=max_iter,
tol=1e-3,
reduction=reduction_method,
max_res=merging_max_res,
device=self.device,
)
else:
depth_pred = depth_preds
# Convert to numpy for saving
depth_pred = depth_pred.cpu().numpy()
# Resize back to original resolution
if resize_back:
pred_img = Image.fromarray(depth_pred)
pred_img = pred_img.resize(input_size)
depth_pred = np.asarray(pred_img)
# Save as 16-bit uint png
bw_path = "/tmp/depth_bw.png"
# scale prediction to [0, 1]
min_d = np.min(depth_pred)
max_d = np.max(depth_pred)
depth_to_save = (depth_pred - min_d) / (max_d - min_d)
depth_to_save = (depth_to_save * 65535.0).astype(np.uint16)
Image.fromarray(depth_to_save).save(bw_path, mode="I;16")
# Colorize
percentile = 0.03
min_depth_pct = np.percentile(depth_pred, percentile)
max_depth_pct = np.percentile(depth_pred, 100 - percentile)
color_path = "/tmp/depth_colored.png"
# [3, H, W] - values in (0, 1)
|
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ckpt_url = "https://weights.replicate.delivery/default/marigold/checkpoint.tar"
if not os.path.exists("/src/checkpoint"):
print("Downloading checkpoint")
try:
output = subprocess.check_output(["pget", "-x", ckpt_url, "/src/tmp"])
os.rename("/src/tmp/", "/src/checkpoint")
except subprocess.CalledProcessError as e:
raise e
# load model
self.model = MarigoldPipeline.from_pretrained("/src/checkpoint", enable_xformers=True)
self.model.to(self.device)
self.model.unet.eval()
def predict(
self,
image: Path = Input(description="Input image, use an RGB image for optimal results."),
resize_input: bool = Input(description="Resize the original input resolution to max resolution.", default=True),
num_infer: int = Input(
ge=1, le=20, default=10,
description="Number of inferences to be ensembled, a higher number gives better results but runs slower."
),
denoise_steps: int = Input(
ge=1, le=50, default=10,
description="Inference denoising steps, more steps results in higher accuracy but slower inference speed."
),
regularizer_strength: float = Input(
ge=0.0, le=1, default=0.02,
description="Ensembling parameter, weight of optimization regularizer.",
),
reduction_method: str = Input(
choices=["mean", "median"], default="median",
description="Ensembling parameter, method to merge aligned depth maps."
),
max_iter: int = Input(ge=1, le=20, default=5, description="Ensembling parameter, max optimization iterations."),
seed: int = Input(description="Seed for reproducibility, set to random if left as None.", default=None),
) -> List[Path]:
"""Run a single prediction on the model"""
if seed is None:
seed = int(time.time())
seed_all(seed)
resize_back = True if resize_input else False
n_repeat = num_infer
merging_max_res = None
# Read input image
input_image = Image.open(str(image))
input_size = input_image.size
# Resize image
if resize_input:
input_image = resize_max_res(input_image, max_edge_resolution=768)
# Convert the image to RGB, to 1.remove the alpha channel 2.convert B&W to 3-channel
input_image = input_image.convert("RGB")
image = np.asarray(input_image)
# Normalize rgb values
rgb = np.transpose(image, (2, 0, 1)) # [H, W, rgb] -> [rgb, H, W]
rgb_norm = rgb / 255.0
rgb_norm = torch.from_numpy(rgb_norm).float()
rgb_norm = rgb_norm.to(self.device)
assert rgb_norm.min() >= 0.0 and rgb_norm.max() <= 1.0
# Batch repeated input image
duplicated_rgb = torch.stack([rgb_norm] * n_repeat)
single_rgb_dataset = TensorDataset(duplicated_rgb)
_bs = find_batch_size(n_repeat=n_repeat, input_res=max(rgb_norm.shape[1:]))
single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
# inference
with torch.no_grad():
# Predict depth maps (batched)
depth_pred_ls = []
for batch in tqdm(single_rgb_loader, desc="multiple inference", leave=False):
(batched_img,) = batch
depth_pred_raw = self.model.forward(
batched_img,
num_inference_steps=denoise_steps,
init_depth_latent=None,
show_pbar=True
)
# clip prediction
depth_pred_raw = torch.clip(depth_pred_raw, -1.0, 1.0)
# shift to [0, 1]
depth_pred_raw = depth_pred_raw * 2.0 - 1.0
depth_pred_ls.append(depth_pred_raw.detach().clone())
depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze()
torch.cuda.empty_cache()
# Test-time ensembling
if n_repeat > 1:
depth_pred, pred_uncert = ensemble_depths(
depth_preds,
regularizer_strength=regularizer_strength,
max_iter=max_iter,
tol=1e-3,
reduction=reduction_method,
max_res=merging_max_res,
device=self.device,
)
else:
depth_pred = depth_preds
# Convert to numpy for saving
depth_pred = depth_pred.cpu().numpy()
# Resize back to original resolution
if resize_back:
pred_img = Image.fromarray(depth_pred)
pred_img = pred_img.resize(input_size)
depth_pred = np.asarray(pred_img)
# Save as 16-bit uint png
bw_path = "/tmp/depth_bw.png"
# scale prediction to [0, 1]
min_d = np.min(depth_pred)
max_d = np.max(depth_pred)
depth_to_save = (depth_pred - min_d) / (max_d - min_d)
depth_to_save = (depth_to_save * 65535.0).astype(np.uint16)
Image.fromarray(depth_to_save).save(bw_path, mode="I;16")
# Colorize
percentile = 0.03
min_depth_pct = np.percentile(depth_pred, percentile)
max_depth_pct = np.percentile(depth_pred, 100 - percentile)
color_path = "/tmp/depth_colored.png"
# [3, H, W] - values in (0, 1) | depth_colored = colorize_depth_maps( | 5 | 2023-12-15 07:19:14+00:00 | 8k |
CoolPointerException/Amigo | main.py | [
{
"identifier": "ProjectsTab",
"path": "gui/tab_projects.py",
"snippet": "class ProjectsTab:\n def __init__(self, root, frame):\n self.frame = frame\n self.root = root\n\n # Select Directory\n ttk.Label(frame, text=\"Project Directory:\", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))\n self.select_directory_button = ttk.Button(frame, text=\"Select Directory\",\n command=self.select_directory)\n self.select_directory_button.pack(padx=10, pady=10)\n\n # Project Name\n ttk.Label(frame, text=\"Project Name:\", style='W.Label').pack(fill=tk.X, padx=10, pady=2)\n self.project_name_entry = ttk.Entry(frame)\n self.project_name_entry.pack(fill=tk.X, padx=10, pady=10)\n\n # Ignore List\n ttk.Label(frame, text=\"Files to ignore:\", style='W.Label').pack(fill=tk.X, padx=10, pady=2)\n # Frame for Listbox and scrollbars\n self.ignore_listbox_frame = tk.Frame(frame)\n self.ignore_listbox_frame.pack(fill=tk.X, padx=10, pady=10)\n\n # Vertical scrollbar\n self.ignore_vscrollbar = tk.Scrollbar(self.ignore_listbox_frame, orient='vertical')\n self.ignore_vscrollbar.pack(side='right', fill='y')\n\n # Ignore Listbox\n self.ignore_listbox = tk.Listbox(self.ignore_listbox_frame, height=5,\n yscrollcommand=self.ignore_vscrollbar.set)\n self.ignore_listbox.pack(side='left', fill='both', expand=True)\n\n # Configure scrollbars\n self.ignore_vscrollbar.config(command=self.ignore_listbox.yview)\n\n # Frame for buttons\n self.buttons_frame = tk.Frame(frame)\n self.buttons_frame.pack(fill=tk.X, padx=10, pady=10)\n\n # Select Files Button\n self.select_files_button = ttk.Button(self.buttons_frame, text=\"Select\", command=self.select_files)\n self.select_files_button.pack(side='left', padx=5, pady=5)\n ttk.Label(self.buttons_frame, text=\"Select which files should be ignored while indexing\")\\\n .pack(side='left', padx=10, pady=2)\n\n # Spacer frame to create gap between buttons\n self.spacer_frame = tk.Frame(self.buttons_frame, width=40)\n self.spacer_frame.pack(side='left', padx=5, pady=5)\n\n # Delete Selected File Button\n self.delete_file_button = ttk.Button(self.buttons_frame, text=\"Delete\", command=self.delete_selected_file)\n self.delete_file_button.pack(side='left', padx=5, pady=5)\n ttk.Label(self.buttons_frame, text=\"Remove selected file from the list\").pack(side='left', padx=10, pady=2)\n\n # Ignore Directory List\n ttk.Label(frame, text=\"Directories to ignore:\", style='W.Label').pack(fill=tk.X, padx=10, pady=2)\n # Frame for Listbox and scrollbars\n self.ignore_directory_listbox_frame = tk.Frame(frame)\n self.ignore_directory_listbox_frame.pack(fill=tk.X, padx=10, pady=10)\n\n # Vertical scrollbar\n self.ignore_directory_vscrollbar = tk.Scrollbar(self.ignore_directory_listbox_frame, orient='vertical')\n self.ignore_directory_vscrollbar.pack(side='right', fill='y')\n\n # Ignore Listbox\n self.ignore_directory_listbox = tk.Listbox(self.ignore_directory_listbox_frame, height=5,\n yscrollcommand=self.ignore_directory_vscrollbar.set)\n self.ignore_directory_listbox.pack(side='left', fill='both', expand=True)\n\n # Configure scrollbars\n self.ignore_directory_vscrollbar.config(command=self.ignore_directory_listbox.yview)\n\n # Frame for buttons\n self.buttons_directory_frame = tk.Frame(frame)\n self.buttons_directory_frame.pack(fill=tk.X, padx=10, pady=10)\n\n # Select Files Button\n self.select_directories_button = ttk.Button(self.buttons_directory_frame, text=\"Select\",\n command=self.select_ignored_directory)\n self.select_directories_button.pack(side='left', padx=5, pady=5)\n ttk.Label(self.buttons_directory_frame, text=\"Select which directories should be ignored while indexing\") \\\n .pack(side='left', padx=10, pady=2)\n\n # Spacer frame to create gap between buttons\n self.spacer_directories_frame = tk.Frame(self.buttons_directory_frame, width=40)\n self.spacer_directories_frame.pack(side='left', padx=5, pady=5)\n\n # Delete Selected File Button\n self.delete_directory_button = ttk.Button(self.buttons_directory_frame, text=\"Delete\",\n command=self.delete_selected_directory)\n self.delete_directory_button.pack(side='left', padx=5, pady=5)\n ttk.Label(self.buttons_directory_frame, text=\"Remove selected directory from the list\").pack(side='left', padx=10, pady=2)\n\n # Add new Project Button\n self.execute_action_button = ttk.Button(frame, text=\"Create Index\",\n command=self.add_new_project, style='W.TButton')\n self.execute_action_button.pack(padx=10, pady=10)\n\n self.generating_label = ttk.Label(frame, text=\"\")\n self.generating_label.pack(padx=10, pady=10)\n\n # Project List Area\n ttk.Label(frame, text=\"Reindex Project:\", style='W.Label').pack(fill=tk.X, padx=10, pady=2)\n ttk.Label(frame, text=\"In case there is a .git folder in your root directory, you have an option to reindex \"\n \"only the files that were added or modified since the last indexing.\", wraplength=880)\\\n .pack(fill=tk.X, padx=10, pady=2)\n\n self.reindex_frame = tk.Frame(frame)\n self.reindex_frame.pack(fill=tk.X)\n\n self.reindex_project = ttk.Combobox(self.reindex_frame)\n self.reindex_project.pack(side='left', padx=10, pady=10, fill=tk.X, expand=True)\n\n # Add new Project Button\n self.reindex_project_button = ttk.Button(self.reindex_frame, text=\"Reindex\", command=self.reindex_project_action)\n self.reindex_project_button.pack(side='left', padx=10, pady=10)\n\n # Logs\n ttk.Label(frame, text=\"Logs:\", style='W.Label').pack(fill=tk.X, padx=10, pady=2)\n # Frame for Logs\n self.logs_frame = tk.Frame(frame)\n self.logs_frame.pack(fill=tk.X, padx=10, pady=10)\n\n # Vertical scrollbar\n self.logs_vscrollbar = tk.Scrollbar(self.logs_frame, orient='vertical')\n self.logs_vscrollbar.pack(side='right', fill='y')\n\n # Logs Listbox\n self.logs_listbox = tk.Listbox(self.logs_frame, height=8,\n yscrollcommand=self.logs_vscrollbar.set)\n self.logs_listbox.pack(side='left', fill='both', expand=True)\n\n # Configure scrollbars\n self.logs_vscrollbar.config(command=self.logs_listbox.yview)\n\n def select_directory(self):\n directory = filedialog.askdirectory()\n if directory:\n self.selected_directory = directory\n self.select_directory_button.config(text=f\"Directory: {directory}\")\n\n def select_files(self):\n if hasattr(self, 'selected_directory'):\n files = filedialog.askopenfilenames(initialdir=self.selected_directory, title=\"Select Files\",\n filetypes=((\"All files\", \"*.*\"),))\n for file in files:\n if file not in self.ignore_listbox.get(0, tk.END): # Avoid duplicates\n self.ignore_listbox.insert(tk.END, file)\n else:\n messagebox.showerror(\"Error\", \"Please select a directory first.\")\n\n def delete_selected_file(self):\n selected_items = self.ignore_listbox.curselection()\n # Must delete from the end to avoid shifting of the indices\n for i in reversed(selected_items):\n self.ignore_listbox.delete(i)\n\n def select_ignored_directory(self):\n if hasattr(self, 'selected_directory'):\n directory = filedialog.askdirectory(initialdir=self.selected_directory, title=\"Select Directory\")\n if directory:\n if directory not in self.ignore_directory_listbox.get(0, tk.END):\n self.ignore_directory_listbox.insert(tk.END, directory)\n\n def delete_selected_directory(self):\n selected_items = self.ignore_directory_listbox.curselection()\n # Must delete from the end to avoid shifting of the indices\n for i in reversed(selected_items):\n self.ignore_directory_listbox.delete(i)\n\n def reindex_project_action(self):\n is_valid = validate(self.root, [\n Properties.REINDEX_PROJECT,\n Properties.THREADS,\n ])\n\n if not is_valid:\n return\n\n reindex_project = self.reindex_project.get()\n\n # parse project name and project directory\n reindex_project = reindex_project.split(\" | \")\n project_name = reindex_project[0]\n project_dir = reindex_project[1]\n\n f = open(project_name + \"/ignored_files.txt\", \"r\")\n ignored_files = f.read().split(\"\\n\")\n\n f = open(project_name + \"/ignored_directories.txt\", \"r\")\n ignored_directories = f.read().split(\"\\n\")\n\n self.create_index_project(\n project_name,\n project_dir,\n ignored_files,\n ignored_directories,\n True\n )\n\n def add_new_project(self):\n is_valid = validate(self.root, [\n Properties.PROJECT_NAME,\n Properties.SELECTED_DIRECTORY,\n Properties.THREADS,\n ])\n\n if not is_valid:\n return\n\n project_name = self.project_name_entry.get()\n directory = self.selected_directory\n files = self.ignore_listbox.get(0, tk.END)\n directories = self.ignore_directory_listbox.get(0, tk.END)\n\n self.create_index_project(\n project_name,\n directory,\n files,\n directories,\n )\n\n def create_index_project(\n self,\n project_name,\n directory,\n files,\n directories,\n is_reindex=False\n ):\n if not is_api_type_set(self.root):\n return\n\n api_type = self.root.settings_tab.api_type.get()\n threads = int(self.root.settings_tab.threads.get())\n\n init_llama_index(self.root, api_type)\n\n diff = None\n if is_reindex:\n diff = git_diff(project_name, self.root)\n\n self.generating_label.config(text=\"Generating index, please wait...\")\n\n try:\n thread = threading.Thread(target=add_project, args=(\n self.root,\n directory,\n project_name,\n files,\n directories,\n is_reindex,\n diff,\n threads\n ))\n\n thread.start()\n except Exception as e:\n messagebox.showerror(\"Error\", f\"Error while generating index!\")\n self.generating_label.config(text=\"Finished!\")\n self.logs_listbox.insert(tk.END, f\"Error: {e}\")"
},
{
"identifier": "load_settings",
"path": "gui/settings.py",
"snippet": "def load_settings(self):\n try:\n with open(self.settings_file, 'r') as f:\n settings = json.load(f)\n if settings.get('api_version'):\n self.settings_tab.api_version_entry.delete(0, tk.END)\n self.settings_tab.api_version_entry.insert(0, settings.get('api_version'))\n if settings.get('api_type'):\n self.settings_tab.api_type.set(settings.get('api_type'))\n self.settings_tab.show_only_relevant_settings()\n if settings.get('api_key'):\n self.settings_tab.api_key_entry.delete(0, tk.END)\n self.settings_tab.api_key_entry.insert(0, settings.get('api_key'))\n if settings.get('api_host'):\n self.settings_tab.api_host_entry.delete(0, tk.END)\n self.settings_tab.api_host_entry.insert(0, settings.get('api_host'))\n if settings.get('gpt_model'):\n self.settings_tab.gpt_model.delete(0, tk.END)\n self.settings_tab.gpt_model.insert(0, settings.get('gpt_model'))\n if settings.get('gpt_deployment'):\n self.settings_tab.gpt_deployment.delete(0, tk.END)\n self.settings_tab.gpt_deployment.insert(0, settings.get('gpt_deployment'))\n if settings.get('embeddings_model'):\n self.settings_tab.embeddings_model_entry.delete(0, tk.END)\n self.settings_tab.embeddings_model_entry.insert(0, settings.get('embeddings_model'))\n if settings.get('embeddings_deployment'):\n self.settings_tab.embeddings_deployment_entry.delete(0, tk.END)\n self.settings_tab.embeddings_deployment_entry.insert(0, settings.get('embeddings_deployment'))\n if settings.get('prompt'):\n self.settings_tab.prompt_entry.delete('1.0', tk.END)\n self.settings_tab.prompt_entry.insert(INSERT, settings.get('prompt', 'This is a prompt'))\n if settings.get('projects'):\n projects = settings.get('projects')\n for project in projects:\n values = list(self.task_tab.selected_project[\"values\"])\n self.task_tab.selected_project[\"values\"] = values + [project]\n self.projects_tab.reindex_project[\"values\"] = values + [project]\n if settings.get('selected_project'):\n self.task_tab.selected_project.set(settings.get('selected_project'))\n self.projects_tab.reindex_project.set(settings.get('selected_project'))\n if settings.get('max_tokens'):\n self.settings_tab.max_tokens.delete(0, tk.END)\n self.settings_tab.max_tokens.insert(INSERT, settings.get('max_tokens'))\n if settings.get('threads'):\n self.settings_tab.threads.delete(0, tk.END)\n self.settings_tab.threads.insert(INSERT, settings.get('threads'))\n\n except FileNotFoundError:\n print(\"Settings file not found. Using default values.\")"
},
{
"identifier": "save_settings",
"path": "gui/settings.py",
"snippet": "def save_settings(self):\n settings = {\n 'api_version': self.settings_tab.api_version_entry.get(),\n 'api_type': self.settings_tab.api_type.get(),\n 'api_key': self.settings_tab.api_key_entry.get(),\n 'api_host': self.settings_tab.api_host_entry.get(),\n 'gpt_model': self.settings_tab.gpt_model.get(),\n 'gpt_deployment': self.settings_tab.gpt_deployment.get(),\n 'embeddings_model': self.settings_tab.embeddings_model_entry.get(),\n 'embeddings_deployment': self.settings_tab.embeddings_deployment_entry.get(),\n 'prompt': self.settings_tab.prompt_entry.get(\"1.0\", tk.END).strip(),\n 'projects': list(self.task_tab.selected_project[\"values\"]),\n 'selected_project': self.task_tab.selected_project.get(),\n 'max_tokens': self.settings_tab.max_tokens.get(),\n 'threads': self.settings_tab.threads.get()\n }\n with open(self.settings_file, 'w') as f:\n json.dump(settings, f, indent=4)"
},
{
"identifier": "SettingsTab",
"path": "gui/tab_settings.py",
"snippet": "class SettingsTab:\n def __init__(self, root, frame):\n self.frame = frame\n self.root = root\n\n # API Type\n ttk.Label(frame, text=\"API Type:\", style='W.Label').grid(column=0, row=0, sticky='E', padx=8)\n self.api_type = ttk.Combobox(frame, state=\"readonly\", values=[\"azure\", \"openai\", \"gemini\"])\n self.api_type.grid(column=1, row=0, padx=24, pady=5, sticky='EW')\n self.api_type.bind('<<ComboboxSelected>>', self.api_type_changed)\n\n # API Key\n ttk.Label(frame, text=\"API Key:\", style='W.Label').grid(column=0, row=1, sticky='E', padx=8)\n self.api_key_entry = ttk.Entry(frame)\n self.api_key_entry.grid(column=1, row=1, padx=24, pady=5, sticky='EW')\n\n # API Host URL\n self.api_host_label = ttk.Label(frame, text=\"API Host URL:\", style='W.Label')\n self.api_host_label.grid(column=0, row=2, sticky='E', padx=8)\n self.api_host_entry = ttk.Entry(frame)\n self.api_host_entry.grid(column=1, row=2, padx=24, pady=5, sticky='EW')\n\n # API Version\n self.api_version_label = ttk.Label(frame, text=\"API Version:\", style='W.Label')\n self.api_version_label.grid(column=0, row=3, sticky='E', padx=8)\n self.api_version_entry = ttk.Entry(frame)\n self.api_version_entry.grid(column=1, row=3, padx=24, pady=5, sticky='EW')\n\n # GPT Model Name\n ttk.Label(frame, text=\"GPT Model Name:\", style='W.Label').grid(column=0, row=4, sticky='E', padx=8)\n self.gpt_model = ttk.Entry(frame)\n self.gpt_model.grid(column=1, row=4, padx=24, pady=5, sticky='EW')\n\n # GPT Deployment Name\n self.gpt_deployment_label = ttk.Label(frame, text=\"GPT Deployment Name:\", style='W.Label')\n self.gpt_deployment_label.grid(column=0, row=5, sticky='E', padx=8)\n self.gpt_deployment = ttk.Entry(frame)\n self.gpt_deployment.grid(column=1, row=5, padx=24, pady=5, sticky='EW')\n\n # Embeddings Model Name\n ttk.Label(frame, text=\"Embed Model Name:\", style='W.Label').grid(column=0, row=6, sticky='E', padx=8)\n self.embeddings_model_entry = ttk.Entry(frame)\n self.embeddings_model_entry.grid(column=1, row=6, padx=24, pady=5, sticky='EW')\n\n # Embeddings Deployment Name\n self.embeddings_deployment_label = ttk.Label(frame, text=\"Embed Depl Name:\", style='W.Label')\n self.embeddings_deployment_label.grid(column=0, row=7, sticky='E', padx=8)\n self.embeddings_deployment_entry = ttk.Entry(frame)\n self.embeddings_deployment_entry.grid(column=1, row=7, padx=24, pady=5, sticky='EW')\n\n # Prompt\n ttk.Label(frame, text=\"Prompt:\", style='W.Label').grid(column=0, row=8, sticky='E', padx=8)\n self.prompt_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)\n self.prompt_entry.configure(state='normal')\n self.prompt_entry.insert(INSERT, \"\"\"You are a programmer who tries to complete tasks that your analytic team provides. They give you a task description and code they think might be relevant. Prepare all the necessary changes that are required for the task to be finished. If the team did not provide enough information, point out what information you still need. If you have enough information then your solutions for tasks should follow these guidelines without any additional explanation:\n - include only the code that needs to be modified/added, \n - print line numbers of the code snippet if a file was modified (use the standard with + prefix for added and - for deleted lines) \n - include in the changes also all the required imports \n - follow the coding style and conventions from the existing files \n - do not explain what the code does \n - explain only if there are some things that needs to be checked, before applying changes (vulnerabilities, ambiguity ... ) \n - if you think there are other files that need to be modified, but were not included in the question, point that out (use the project structure as reference) \n - do not create any comments in code snippets \"\"\")\n self.prompt_entry.grid(column=1, row=8, padx=24, pady=5)\n\n # Optional Settings\n ttk.Label(frame, text=\"Max input token length:\", style='W.Label').grid(column=0, row=9, sticky='E', padx=8)\n self.max_tokens = ttk.Entry(frame)\n self.max_tokens.grid(column=1, row=9, padx=24, pady=5, sticky='EW')\n self.max_tokens.insert(INSERT, \"64000\")\n\n ttk.Label(frame, text=\"Number of threads:\", style='W.Label').grid(column=0, row=10, sticky='E', padx=8)\n self.threads = ttk.Entry(frame)\n self.threads.insert(INSERT, \"1\")\n self.threads.grid(column=1, row=10, padx=24, pady=5, sticky='EW')\n\n def api_type_changed(self, event):\n self.root.isLlamaInitialized = False\n self.show_only_relevant_settings()\n\n def show_only_relevant_settings(self):\n if self.api_type.get() == \"openai\" or self.api_type.get() == \"gemini\":\n self.api_host_label.grid_remove()\n self.api_host_entry.grid_remove()\n self.api_version_label.grid_remove()\n self.api_version_entry.grid_remove()\n self.gpt_deployment_label.grid_remove()\n self.gpt_deployment.grid_remove()\n self.embeddings_deployment_label.grid_remove()\n self.embeddings_deployment_entry.grid_remove()\n if self.api_type.get() == \"azure\":\n self.api_host_label.grid(column=0, row=2, sticky='E', padx=8)\n self.api_host_entry.grid(column=1, row=2, padx=24, pady=5, sticky='EW')\n self.api_version_label.grid(column=0, row=3, sticky='E', padx=8)\n self.api_version_entry.grid(column=1, row=3, padx=24, pady=5, sticky='EW')\n self.gpt_deployment_label.grid(column=0, row=5, sticky='E', padx=8)\n self.gpt_deployment.grid(column=1, row=5, padx=24, pady=5, sticky='EW')\n self.embeddings_deployment_label.grid(column=0, row=7, sticky='E', padx=8)\n self.embeddings_deployment_entry.grid(column=1, row=7, padx=24, pady=5, sticky='EW')"
},
{
"identifier": "TaskTab",
"path": "gui/tab_task.py",
"snippet": "class TaskTab:\n def __init__(self, root, frame):\n self.frame = frame\n self.root = root\n\n # Task Requirements\n ttk.Label(frame, text=\"Task Requirements:\", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))\n self.task_requirements_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)\n self.task_requirements_entry.configure(state='normal')\n self.task_requirements_entry.pack(fill=tk.X, padx=10, pady=10)\n\n # Select project\n ttk.Label(frame, text=\"Selected Project:\", style='W.Label').pack(fill=tk.X, padx=10, pady=2)\n self.selected_project = ttk.Combobox(frame)\n self.selected_project.pack(fill=tk.X, padx=10, pady=10)\n\n # Run Generation Button\n self.run_generation_button = ttk.Button(frame, text=\"Generate\", command=self.generate_answer)\n self.run_generation_button.pack(padx=10, pady=10)\n\n # Clear chat Button\n self.run_generation_button = ttk.Button(frame, text=\"Clear chat\", command=self.clear_chat)\n self.run_generation_button.pack(padx=10, pady=10)\n\n # Generation Response Field\n self.generation_response_frame = ttk.Frame(self.frame)\n self.generation_response = HtmlFrame(self.generation_response_frame)\n\n # Loading screen\n self.loading_frame = ttk.Frame(self.frame)\n self.loader = HtmlFrame(self.loading_frame)\n self.load_loading_page()\n\n def clear_chat(self):\n self.root.messages = []\n self.load_web_page()\n\n def generate_answer(self):\n is_valid = validate(self.root, [\n Properties.TASK_REQUIREMENTS,\n Properties.SELECTED_PROJECT,\n Properties.PROMPT,\n Properties.MAX_TOKENS,\n Properties.API_TYPE,\n ])\n if not is_valid:\n return\n\n api_type = self.root.settings_tab.api_type.get()\n task_requirements = self.task_requirements_entry.get(\"1.0\", tk.END)\n project_name = self.selected_project.get()\n prompt = self.root.settings_tab.prompt_entry.get(\"1.0\", tk.END)\n max_tokens = self.root.settings_tab.max_tokens.get()\n\n init_llama_index(self.root, api_type)\n\n self.generation_response_frame.place_forget()\n self.loading_frame.place(y=380, x=0, relwidth=1, height=900)\n\n if not self.root.messages:\n self.root.messages.append(\n ChatMessage(role=\"system\", content=prompt)\n )\n\n try:\n thread = threading.Thread(target=question, args=(\n project_name,\n task_requirements,\n prompt,\n max_tokens,\n self.root\n ))\n\n thread.start()\n\n except Exception as e:\n messagebox.showerror(\"Error\", \"Error occurred while generating response: \\n\" + str(e))\n self.root.messages = []\n\n def load_web_page(self):\n tempfile.NamedTemporaryFile(mode='w')\n f = open(\"temp.html\", 'w')\n f.write(\"<html><body style='background: rgb(28, 28, 28)'>\")\n for message in self.root.messages:\n content = message.content.replace(\"\\n\", \"<br>\")\n if message.role == \"system\":\n continue\n if message.role == \"user\":\n f.write(f\"<p style='color: rgb(255, 255, 255); font-size: 12px; font-family: Arial; margin-right: \"\n f\"40px; border-radius: 0 12px 12px 0; background: \"\n f\"rgb(117, 92, 129); padding: 10px;'>{content}</p>\")\n if message.role == \"assistant\":\n f.write(f\"<p style='color: rgb(255, 255, 255); font-size: 12px; font-family: Arial; margin-left: \"\n f\"40px; border-radius: 12px 0 0 12px; background: rgb(117, 92, 129); padding: 10px'>{content}</p>\")\n\n f.write(\"</body></html>\")\n f.flush()\n f.close()\n self.generation_response.load_file(os.path.abspath(f.name), force=True)\n self.generation_response.pack()\n return True\n\n def load_loading_page(self):\n temp_folder = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n script_dir = os.path.dirname(os.path.abspath(__file__))\n\n if temp_folder == script_dir:\n # Running as a Python script.\n loading_animation = os.path.abspath(\"assets/loading.png\").replace('\\\\', '/')\n else:\n # Running as a PyInstaller bundle.\n loading_animation = os.path.join(temp_folder, 'loading.png').replace('\\\\', '/')\n\n tempfile.NamedTemporaryFile(mode='r')\n tempfile.NamedTemporaryFile(mode='w')\n f = open(\"loading.html\", 'w')\n f.write(f\"<html><body style='background-color: rgb(28, 28, 28)'><img src='file:///{loading_animation}' style\"\n f\"='width: 300px; height: 300px; margin: auto; display: block; padding-top: 100px'></body></html>\")\n f.flush()\n f.close()\n self.loader.load_file(os.path.abspath(\"loading.html\"), force=True)\n self.loader.pack()\n return True"
}
] | import tkinter as tk
import sv_ttk
from tkinter import ttk
from tkinter.ttk import Style
from gui.tab_projects import ProjectsTab
from gui.settings import load_settings, save_settings
from gui.tab_settings import SettingsTab
from gui.tab_task import TaskTab | 6,721 |
class Application(tk.Tk):
def __init__(self):
super().__init__()
self.title("Amigo")
self.geometry("900x1100")
self.style = Style()
self.isLlamaInitialized = False
sv_ttk.set_theme("dark")
self.messages = []
self.style.configure('W.TButton', font=('calibri', 18, 'bold', 'underline'), borderwidth='4')
self.style.configure('W.Label', font=('calibri', 13, 'bold'))
# Create the tab control
self.tab_control = ttk.Notebook(self)
# Create tabs
self.settings_frame = ttk.Frame(self.tab_control)
self.task_frame = ttk.Frame(self.tab_control)
self.projects_frame = ttk.Frame(self.tab_control)
# Add tabs to notebook
self.tab_control.add(self.task_frame, text='Task')
self.tab_control.add(self.projects_frame, text='Projects')
self.tab_control.add(self.settings_frame, text='Settings')
# Init UI
|
class Application(tk.Tk):
def __init__(self):
super().__init__()
self.title("Amigo")
self.geometry("900x1100")
self.style = Style()
self.isLlamaInitialized = False
sv_ttk.set_theme("dark")
self.messages = []
self.style.configure('W.TButton', font=('calibri', 18, 'bold', 'underline'), borderwidth='4')
self.style.configure('W.Label', font=('calibri', 13, 'bold'))
# Create the tab control
self.tab_control = ttk.Notebook(self)
# Create tabs
self.settings_frame = ttk.Frame(self.tab_control)
self.task_frame = ttk.Frame(self.tab_control)
self.projects_frame = ttk.Frame(self.tab_control)
# Add tabs to notebook
self.tab_control.add(self.task_frame, text='Task')
self.tab_control.add(self.projects_frame, text='Projects')
self.tab_control.add(self.settings_frame, text='Settings')
# Init UI | self.settings_tab = SettingsTab(self, self.settings_frame) | 3 | 2023-12-15 14:06:38+00:00 | 8k |
quocanh34/magic-animate-modified | magicanimate/models/unet_3d_blocks.py | [
{
"identifier": "Transformer3DModel",
"path": "magicanimate/models/attention.py",
"snippet": "class Transformer3DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n # Define input layers\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n\n # Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n if use_linear_projection:\n self.proj_out = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):\n # Input\n assert hidden_states.dim() == 5, f\"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}.\"\n video_length = hidden_states.shape[2]\n hidden_states = rearrange(hidden_states, \"b c f h w -> (b f) c h w\")\n # JH: need not repeat when a list of prompts are given \n if encoder_hidden_states.shape[0] != hidden_states.shape[0]:\n encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)\n\n batch, channel, height, weight = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = self.proj_in(hidden_states)\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n\n # Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n video_length=video_length\n )\n\n # Output\n if not self.use_linear_projection:\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n hidden_states = self.proj_out(hidden_states)\n else:\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n\n output = hidden_states + residual\n\n output = rearrange(output, \"(b f) c h w -> b c f h w\", f=video_length)\n if not return_dict:\n return (output,)\n\n return Transformer3DModelOutput(sample=output)"
},
{
"identifier": "Downsample3D",
"path": "magicanimate/models/resnet.py",
"snippet": "class Downsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n\n if use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n else:\n raise NotImplementedError\n\n def forward(self, hidden_states):\n assert hidden_states.shape[1] == self.channels\n if self.use_conv and self.padding == 0:\n raise NotImplementedError\n\n assert hidden_states.shape[1] == self.channels\n hidden_states = self.conv(hidden_states)\n\n return hidden_states"
},
{
"identifier": "ResnetBlock3D",
"path": "magicanimate/models/resnet.py",
"snippet": "class ResnetBlock3D(nn.Module):\n def __init__(\n self,\n *,\n in_channels,\n out_channels=None,\n conv_shortcut=False,\n dropout=0.0,\n temb_channels=512,\n groups=32,\n groups_out=None,\n pre_norm=True,\n eps=1e-6,\n non_linearity=\"swish\",\n time_embedding_norm=\"default\",\n output_scale_factor=1.0,\n use_in_shortcut=None,\n ):\n super().__init__()\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.time_embedding_norm = time_embedding_norm\n self.output_scale_factor = output_scale_factor\n\n if groups_out is None:\n groups_out = groups\n\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n\n self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if temb_channels is not None:\n if self.time_embedding_norm == \"default\":\n time_emb_proj_out_channels = out_channels\n elif self.time_embedding_norm == \"scale_shift\":\n time_emb_proj_out_channels = out_channels * 2\n else:\n raise ValueError(f\"unknown time_embedding_norm : {self.time_embedding_norm} \")\n\n self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)\n else:\n self.time_emb_proj = None\n\n self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n self.dropout = torch.nn.Dropout(dropout)\n self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if non_linearity == \"swish\":\n self.nonlinearity = lambda x: F.silu(x)\n elif non_linearity == \"mish\":\n self.nonlinearity = Mish()\n elif non_linearity == \"silu\":\n self.nonlinearity = nn.SiLU()\n\n self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut\n\n self.conv_shortcut = None\n if self.use_in_shortcut:\n self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, input_tensor, temb):\n hidden_states = input_tensor\n\n hidden_states = self.norm1(hidden_states)\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.conv1(hidden_states)\n\n if temb is not None:\n temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]\n\n if temb is not None and self.time_embedding_norm == \"default\":\n hidden_states = hidden_states + temb\n\n hidden_states = self.norm2(hidden_states)\n\n if temb is not None and self.time_embedding_norm == \"scale_shift\":\n scale, shift = torch.chunk(temb, 2, dim=1)\n hidden_states = hidden_states * (1 + scale) + shift\n\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor = self.conv_shortcut(input_tensor)\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n\n return output_tensor"
},
{
"identifier": "Upsample3D",
"path": "magicanimate/models/resnet.py",
"snippet": "class Upsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_conv_transpose = use_conv_transpose\n self.name = name\n\n conv = None\n if use_conv_transpose:\n raise NotImplementedError\n elif use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)\n\n def forward(self, hidden_states, output_size=None):\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv_transpose:\n raise NotImplementedError\n\n # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n\n # if `output_size` is passed we force the interpolation output\n # size and do not make use of `scale_factor=2`\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n\n # If the input is bfloat16, we cast back to bfloat16\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n\n hidden_states = self.conv(hidden_states)\n\n return hidden_states"
},
{
"identifier": "get_motion_module",
"path": "magicanimate/models/motion_module.py",
"snippet": "def get_motion_module(\n in_channels,\n motion_module_type: str, \n motion_module_kwargs: dict\n):\n if motion_module_type == \"Vanilla\":\n return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,) \n else:\n raise ValueError"
}
] | import torch
from torch import nn
from .attention import Transformer3DModel
from .resnet import Downsample3D, ResnetBlock3D, Upsample3D
from .motion_module import get_motion_module | 4,617 | attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Adapted from https://github.com/guoyww/AnimateDiff
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_down_block(
down_block_type,
num_layers,
in_channels,
out_channels,
temb_channels,
add_downsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
downsample_padding=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock3D":
return DownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif down_block_type == "CrossAttnDownBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D")
return CrossAttnDownBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{down_block_type} does not exist.")
def get_up_block(
up_block_type,
num_layers,
in_channels,
out_channels,
prev_output_channel,
temb_channels,
add_upsample,
resnet_eps,
resnet_act_fn,
attn_num_head_channels,
resnet_groups=None,
cross_attention_dim=None,
dual_cross_attention=False,
use_linear_projection=False,
only_cross_attention=False,
upcast_attention=False,
resnet_time_scale_shift="default",
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock3D":
return UpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
elif up_block_type == "CrossAttnUpBlock3D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D")
return CrossAttnUpBlock3D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attn_num_head_channels,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
raise ValueError(f"{up_block_type} does not exist.")
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
attn_num_head_channels=1,
output_scale_factor=1.0,
cross_attention_dim=1280,
dual_cross_attention=False,
use_linear_projection=False,
upcast_attention=False,
unet_use_cross_frame_attention=None,
unet_use_temporal_attention=None,
use_motion_module=None,
motion_module_type=None,
motion_module_kwargs=None,
):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# there is always at least one resnet
resnets = [
ResnetBlock3D(
in_channels=in_channels,
out_channels=in_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
motion_modules = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append( | Transformer3DModel( | 0 | 2023-12-15 01:22:37+00:00 | 8k |
morikeli/persona | main.py | [
{
"identifier": "facial_expression",
"path": "features/person/faces/expressions/facial_expression.py",
"snippet": "FACIAL_EXPRESSIONS = [\n 'DEFAULT',\n 'ANGRY',\n 'ANGRY_NATURAL',\n 'DEFAULT_NATURAL',\n 'FLAT_NATURAL',\n 'FROWN_NATURAL',\n 'RAISED_EXCITED',\n 'RAISED_EXCITED_NATURAL',\n 'SAD_CONCERNED',\n 'SAD_CONCERNED_NATURAL',\n 'UNI_BROW_NATURAL',\n 'UP_DOWN',\n 'UP_DOWN_NATURAL',\n]\nFACIAL_EXPRESSIONS_MOUTH = [\n 'DEFAULT',\n 'CONCERNED',\n 'DISBELIEF',\n 'EATING',\n 'GRIMACE',\n 'SAD',\n 'SCREAM_OPEN',\n 'SERIOUS',\n 'SMILE',\n 'TONGUE',\n 'TWINKLE',\n 'VOMIT',\n]"
},
{
"identifier": "add_ons",
"path": "features/fashion/accessories/add_ons.py",
"snippet": "FASHION_ACCESSORIES = [\n 'DEFAULT',\n 'KURT',\n 'PRESCRIPTION_01',\n 'PRESCRIPTION_02',\n 'ROUND',\n 'SUNGLASSES',\n 'WAYFARERS',\n]"
},
{
"identifier": "clothes",
"path": "features/fashion/clothing/clothes.py",
"snippet": "CLOTHES_CATEGORIES = [\n 'BLAZER_SHIRT',\n 'BLAZER_SWEATER',\n 'COLLAR_SWEATER',\n 'GRAPHIC_SHIRT',\n 'HOODIE',\n 'OVERALL',\n 'SHIRT_CREW_NECK',\n 'SHIRT_SCOOP_NECK',\n 'SHIRT_V_NECK',\n]\nCLOTHES_COLOR = [\n 'BLACK',\n 'BLUE_01',\n 'BLUE_02',\n 'BLUE_03',\n 'GRAY_01',\n 'GRAY_02',\n 'HEATHER',\n 'PASTEL_BLUE',\n 'PASTEL_GREEN',\n 'PASTEL_ORANGE',\n 'PASTEL_RED',\n 'PASTEL_YELLOW',\n 'PINK',\n 'RED',\n 'WHITE',\n]\nCLOTHES_GRAPHICS = [\n 'BAT',\n 'BEAR',\n 'CUMBIA',\n 'DEER',\n 'DIAMOND',\n 'HOLA',\n 'PIZZA',\n 'RESIST',\n 'SELENA',\n 'SKULL',\n 'SKULL_OUTLINE',\n]"
},
{
"identifier": "hats",
"path": "features/fashion/clothing/hats.py",
"snippet": "HEADWEAR = [\n 'EYE_PATCH',\n 'HAT',\n 'HIJAB',\n 'TURBAN',\n 'WINTER_HAT1',\n 'WINTER_HAT2',\n 'WINTER_HAT3',\n 'WINTER_HAT4',\n]\nHAT_COLOR = [\n 'BLACK',\n 'BLUE_01',\n 'BLUE_02',\n 'BLUE_03',\n 'GRAY_01',\n 'GRAY_02',\n 'HEATHER',\n 'PASTEL_BLUE',\n 'PASTEL_GREEN',\n 'PASTEL_ORANGE',\n 'PASTEL_RED',\n 'PASTEL_YELLOW',\n 'PINK',\n 'RED',\n 'WHITE',\n\n]"
},
{
"identifier": "beard",
"path": "features/fashion/hairstyles/beard.py",
"snippet": "BEARD = [\n 'DEFAULT',\n 'BEARD_MEDIUM',\n 'BEARD_LIGHT',\n 'BEARD_MAJESTIC',\n 'MOUSTACHE_FANCY',\n 'MOUSTACHE_MAGNUM',\n]\nBEARD_COLOR = [\n 'AUBURN',\n 'BLACK',\n 'BLONDE',\n 'BLONDE_GOLDEN',\n 'BROWN',\n 'BROWN_DARK',\n 'PASTEL_PINK',\n 'PLATINUM',\n 'RED',\n 'SILVER_GRAY',\n]"
},
{
"identifier": "hair",
"path": "features/fashion/hairstyles/hair.py",
"snippet": "HAIR_COLOR = [\n 'AUBURN',\n 'BLACK',\n 'BLONDE',\n 'BLONDE_GOLDEN',\n 'BROWN',\n 'BROWN_DARK',\n 'PASTEL_PINK',\n 'PLATINUM',\n 'RED',\n 'SILVER_GRAY',\n]\nHAIR_STYLES = [\n 'NO_HAIR',\n 'LONG_HAIR_BIG_HAIR',\n 'LONG_HAIR_BOB',\n 'LONG_HAIR_BUN',\n 'LONG_HAIR_CURLY',\n 'LONG_HAIR_CURVY',\n 'LONG_HAIR_DREADS',\n 'LONG_HAIR_FRIDA',\n 'LONG_HAIR_FRO',\n 'LONG_HAIR_FRO_BAND',\n 'LONG_HAIR_NOT_TOO_LONG',\n 'LONG_HAIR_SHAVED_SIDES',\n 'LONG_HAIR_MIA_WALLACE',\n 'LONG_HAIR_STRAIGHT',\n 'LONG_HAIR_STRAIGHT2',\n 'LONG_HAIR_STRAIGHT_STRAND',\n 'SHORT_HAIR_DREADS_01',\n 'SHORT_HAIR_DREADS_02',\n 'SHORT_HAIR_FRIZZLE',\n 'SHORT_HAIR_SHAGGY_MULLET',\n 'SHORT_HAIR_SHORT_CURLY',\n 'SHORT_HAIR_SHORT_FLAT',\n 'SHORT_HAIR_SHORT_ROUND',\n 'SHORT_HAIR_SHORT_WAVED',\n 'SHORT_HAIR_SIDES',\n 'SHORT_HAIR_THE_CAESAR',\n 'SHORT_HAIR_THE_CAESAR_SIDE_PART',\n]"
},
{
"identifier": "skins",
"path": "features/person/complexion/skins.py",
"snippet": "SKIN_COLOR = [\n 'BLACK',\n 'BROWN',\n 'DARK_BROWN',\n 'LIGHT',\n 'PALE',\n 'TANNED',\n 'YELLOW',\n \n]"
},
{
"identifier": "face",
"path": "features/person/faces/face.py",
"snippet": "EYES = [\n 'DEFAULT',\n 'CLOSE',\n 'CRY',\n 'DIZZY',\n 'EYE_ROLL',\n 'HAPPY',\n 'HEARTS',\n 'SIDE',\n 'SQUINT',\n 'SURPRISED',\n 'WINK',\n 'WINK_WACKY',\n]"
},
{
"identifier": "random_avatar",
"path": "avatar/avatar.py",
"snippet": "def random_avatar():\n \"\"\" This is a function that automatically generates an avatar using random avatar features. \"\"\"\n\n features = {\n 'accessories': randrange(0, len(add_ons.FASHION_ACCESSORIES)),\n 'background': randrange(0, len(['CIRCLE', 'TRANSPARENT'])),\n 'beard': randrange(0, len(beard.BEARD)),\n 'beard_color': randrange(0, len(beard.BEARD_COLOR)),\n 'clothing': randrange(0, len(clothes.CLOTHES_CATEGORIES)),\n 'clothes_color': randrange(0, len(clothes.CLOTHES_COLOR)),\n 'clothes_art': randrange(0, len(clothes.CLOTHES_GRAPHICS)),\n 'eyes': randrange(0, len(face.EYES)),\n 'face_expression': randrange(0, len(facial_expression.FACIAL_EXPRESSIONS)),\n 'hair': randrange(0, len(hair.HAIR_STYLES)),\n 'headwear': randrange(0, len(hats.HEADWEAR)),\n 'hair_and_headwear': randrange(0, len(hair.HAIR_STYLES + hats.HEADWEAR)),\n 'hair_color': randrange(0, len(hair.HAIR_COLOR)),\n 'hat_color': randrange(0, len(hats.HAT_COLOR)),\n 'mouth': randrange(0, len(facial_expression.FACIAL_EXPRESSIONS_MOUTH)),\n 'skin': randrange(0, len(skins.SKIN_COLOR)),\n }\n\n return features"
},
{
"identifier": "custom_avatar",
"path": "avatar/avatar.py",
"snippet": "def custom_avatar(features):\n \"\"\" This is a function that generates an avatar depending on the user's input. \"\"\"\n\n avatar = pa.PyAvataaar(\n accessories_type=eval(f'pa.AccessoriesType.{features[\"accessories\"]}'),\n clothe_type=eval(f'pa.ClotheType.{features[\"clothing\"]}'),\n clothe_color=eval(f'pa.Color.{features[\"clothes_color\"]}'),\n clothe_graphic_type=eval(f'pa.ClotheGraphicType.{features[\"clothes_art\"]}'),\n eye_type=eval(f'pa.EyesType.{features[\"eyes\"]}'),\n eyebrow_type=eval(f'pa.EyebrowType.{features[\"face_expression\"]}'),\n hair_color=eval(f'pa.HairColor.{features[\"hair_color\"]}'),\n hat_color=eval(f'pa.Color.{features[\"hat_color\"]}'),\n facial_hair_type=eval(f'pa.FacialHairType.{features[\"beard\"]}'),\n facial_hair_color=eval(f'pa.HairColor.{features[\"beard_color\"]}'),\n mouth_type=eval(f'pa.MouthType.{features[\"mouth\"]}'),\n skin_color=eval(f'pa.SkinColor.{features[\"skin\"]}'),\n style=eval(f'pa.AvatarStyle.{features[\"bg\"]}'),\n top_type=eval(f'pa.TopType.SHORT_HAIR_SHORT_FLAT.{features[\"hair_and_headwear\"]}'),\n\n )\n\n render_img = avatar.render_png_file(IMAGE_FILE)\n image = Image.open(IMAGE_FILE)\n\n return update_avatar_image(image)"
},
{
"identifier": "christmas_festive_animation",
"path": "animations/utils.py",
"snippet": "def christmas_festive_animation():\n \"\"\" Display snowflakes animaton during Christmas festive season. \"\"\"\n \n current_year = dt.now().date()\n previous_year = dt.today().year - 1\n current_festive_date = dt(year=previous_year, month=12, day=20).date()\n date_diff = current_year - current_festive_date\n\n if (date_diff.days >= 0) and (date_diff.days <= 21):\n return st.snow()"
},
{
"identifier": "download_avatar",
"path": "images/image.py",
"snippet": "def download_avatar(avatar_image):\n \"\"\" This function allows one to download their avatars.\"\"\"\n\n with open(avatar_image, 'rb') as image_file:\n st.download_button(\n label='Download avatar',\n type=\"primary\",\n data=image_file,\n file_name=avatar_image,\n use_container_width=True\n )\n \n return image_file"
}
] | from features.person.faces.expressions import facial_expression as fe
from features.fashion.accessories import add_ons
from features.fashion.clothing import clothes, hats
from features.fashion.hairstyles import beard, hair
from features.person.complexion import skins
from features.person.faces import face
from avatar.avatar import random_avatar, custom_avatar
from animations.utils import christmas_festive_animation
from images.image import download_avatar
import streamlit as st | 3,773 | # download_avatar()
st.balloons()
if cols_btn[0].button('Generate random avatar', use_container_width=True):
features_indices = random_avatar()
with tabs[0]:
st.caption('Add beard, hairstyle or hair cut')
avatar_hair = st.selectbox(
label=':haircut: Hair',
options=hair.HAIR_STYLES,
index=features_indices["hair"] if features_indices else 0,
)
avatar_beard = st.selectbox(
label=':bearded_person: Beard',
options=beard.BEARD,
index=features_indices["beard"] if features_indices else 0,
)
with tabs[1]:
st.caption('Add eyes or facial expression.')
avatar_eyes = st.selectbox(
label=':eyes: Eyes',
options=face.EYES,
index=features_indices["eyes"] if features_indices else 0,
)
avatar_facial_expr = st.selectbox(
label=':smiley: Facial expression',
options=fe.FACIAL_EXPRESSIONS,
index=features_indices["face_expression"] if features_indices else 0,
)
avatar_mouth = st.selectbox(
label=':lips: Mouth',
options=fe.FACIAL_EXPRESSIONS_MOUTH,
index=features_indices["mouth"] if features_indices else 0,
)
with tabs[2]:
st.caption("What are your favorite fashion trends?")
tabs_cols = st.columns([6, 6])
avatar_addons = tabs_cols[0].selectbox(
label=':sunglasses: Accessories',
options=add_ons.FASHION_ACCESSORIES,
index=features_indices["accessories"] if features_indices else 0,
)
avatar_clothe = tabs_cols[0].selectbox(
label=':tshirt: Clothes',
options=clothes.CLOTHES_CATEGORIES,
index=features_indices["clothing"] if features_indices else 0,
)
avatar_clothe_pattern = tabs_cols[1].selectbox(
label=':art: Clothe pattern',
options=clothes.CLOTHES_GRAPHICS,
index=features_indices["clothes_art"] if features_indices else 0,
)
avatar_hat = tabs_cols[1].selectbox(
label=':face_with_cowboy_hat: Headwear',
options=hats.HEADWEAR,
index=features_indices["headwear"] if features_indices else 0,
)
with tabs[3]:
st.caption('Play with colors')
tabs_cols = st.columns([6, 6])
avatar_skin_color = st.selectbox(
label='Skin complexion',
options=skins.SKIN_COLOR,
index=features_indices["skin"] if features_indices else 0,
)
avatar_hair_color = tabs_cols[0].selectbox(
label='Dye/Hair color',
options=hair.HAIR_COLOR,
index=features_indices["hair_color"] if features_indices else 0,
)
avatar_beard_color = tabs_cols[0].selectbox(
label='Beard color',
options=beard.BEARD_COLOR,
index=features_indices["beard_color"] if features_indices else 0,
)
avatar_clothes_color = tabs_cols[1].selectbox(
label='Clothes color',
options=clothes.CLOTHES_COLOR,
index=features_indices["clothes_color"] if features_indices else 0,
)
avatar_hat_color = tabs_cols[1].selectbox(
label='Hat color',
options=hats.HAT_COLOR,
index=features_indices["hat_color"] if features_indices else 0,
)
with tabs[4]:
st.caption('Add or remove background color in your avatar')
avatar_bg = st.selectbox(
label='Background',
options=('CIRCLE', 'TRANSPARENT'),
index=features_indices["background"] if features_indices else 0,
)
# selected avatar features
avatar_features = {
'accessories': avatar_addons,
'bg': avatar_bg,
'beard': avatar_beard,
'beard_color': avatar_beard_color,
'clothing': avatar_clothe,
'clothes_color': avatar_clothes_color,
'clothes_art': avatar_clothe_pattern,
'eyes': avatar_eyes,
'face_expression': avatar_facial_expr,
'hair_and_headwear': avatar_hair if avatar_hair != 'NO_HAIR' else avatar_hat, # display a hat if avatar_hair is "NO_HAIR"
'hair_color': avatar_hair_color,
'hat_color': avatar_hat_color,
'mouth': avatar_mouth,
'skin': avatar_skin_color,
}
|
# webpage configuration
st.set_page_config(page_title='Persona', page_icon=':busts_in_silhouette:', layout='centered')
with open('static/css/styles.css') as stylesheet:
st.markdown(f'<style>{stylesheet.read()}</style>', unsafe_allow_html=True)
def main(features_indices: dict = None):
""" This is the main function that uses streamlit to create a dynamic web page. """
# navigation tabs
tabs = st.tabs(['Beard & Hair', 'Facial features', 'Fashion trends', 'Color', 'Background style'])
st.divider()
# "Generate random avatar" & "Download button" buttons column
cols_btn = st.columns([6, 6])
with cols_btn[1]:
download_btn = download_avatar()
if download_btn: # display download button by default
# download_avatar()
st.balloons()
if cols_btn[0].button('Generate random avatar', use_container_width=True):
features_indices = random_avatar()
with tabs[0]:
st.caption('Add beard, hairstyle or hair cut')
avatar_hair = st.selectbox(
label=':haircut: Hair',
options=hair.HAIR_STYLES,
index=features_indices["hair"] if features_indices else 0,
)
avatar_beard = st.selectbox(
label=':bearded_person: Beard',
options=beard.BEARD,
index=features_indices["beard"] if features_indices else 0,
)
with tabs[1]:
st.caption('Add eyes or facial expression.')
avatar_eyes = st.selectbox(
label=':eyes: Eyes',
options=face.EYES,
index=features_indices["eyes"] if features_indices else 0,
)
avatar_facial_expr = st.selectbox(
label=':smiley: Facial expression',
options=fe.FACIAL_EXPRESSIONS,
index=features_indices["face_expression"] if features_indices else 0,
)
avatar_mouth = st.selectbox(
label=':lips: Mouth',
options=fe.FACIAL_EXPRESSIONS_MOUTH,
index=features_indices["mouth"] if features_indices else 0,
)
with tabs[2]:
st.caption("What are your favorite fashion trends?")
tabs_cols = st.columns([6, 6])
avatar_addons = tabs_cols[0].selectbox(
label=':sunglasses: Accessories',
options=add_ons.FASHION_ACCESSORIES,
index=features_indices["accessories"] if features_indices else 0,
)
avatar_clothe = tabs_cols[0].selectbox(
label=':tshirt: Clothes',
options=clothes.CLOTHES_CATEGORIES,
index=features_indices["clothing"] if features_indices else 0,
)
avatar_clothe_pattern = tabs_cols[1].selectbox(
label=':art: Clothe pattern',
options=clothes.CLOTHES_GRAPHICS,
index=features_indices["clothes_art"] if features_indices else 0,
)
avatar_hat = tabs_cols[1].selectbox(
label=':face_with_cowboy_hat: Headwear',
options=hats.HEADWEAR,
index=features_indices["headwear"] if features_indices else 0,
)
with tabs[3]:
st.caption('Play with colors')
tabs_cols = st.columns([6, 6])
avatar_skin_color = st.selectbox(
label='Skin complexion',
options=skins.SKIN_COLOR,
index=features_indices["skin"] if features_indices else 0,
)
avatar_hair_color = tabs_cols[0].selectbox(
label='Dye/Hair color',
options=hair.HAIR_COLOR,
index=features_indices["hair_color"] if features_indices else 0,
)
avatar_beard_color = tabs_cols[0].selectbox(
label='Beard color',
options=beard.BEARD_COLOR,
index=features_indices["beard_color"] if features_indices else 0,
)
avatar_clothes_color = tabs_cols[1].selectbox(
label='Clothes color',
options=clothes.CLOTHES_COLOR,
index=features_indices["clothes_color"] if features_indices else 0,
)
avatar_hat_color = tabs_cols[1].selectbox(
label='Hat color',
options=hats.HAT_COLOR,
index=features_indices["hat_color"] if features_indices else 0,
)
with tabs[4]:
st.caption('Add or remove background color in your avatar')
avatar_bg = st.selectbox(
label='Background',
options=('CIRCLE', 'TRANSPARENT'),
index=features_indices["background"] if features_indices else 0,
)
# selected avatar features
avatar_features = {
'accessories': avatar_addons,
'bg': avatar_bg,
'beard': avatar_beard,
'beard_color': avatar_beard_color,
'clothing': avatar_clothe,
'clothes_color': avatar_clothes_color,
'clothes_art': avatar_clothe_pattern,
'eyes': avatar_eyes,
'face_expression': avatar_facial_expr,
'hair_and_headwear': avatar_hair if avatar_hair != 'NO_HAIR' else avatar_hat, # display a hat if avatar_hair is "NO_HAIR"
'hair_color': avatar_hair_color,
'hat_color': avatar_hat_color,
'mouth': avatar_mouth,
'skin': avatar_skin_color,
}
| return custom_avatar(avatar_features) | 9 | 2023-12-19 09:39:04+00:00 | 8k |
JonatanNevo/better-iptables | iptables/iptables.py | [
{
"identifier": "ConnbytesDirection",
"path": "iptables/enums.py",
"snippet": "class ConnbytesDirection(str, Enum):\n ORIGINAL = \"original\"\n REPLY = \"reply\"\n BOTH = \"both\""
},
{
"identifier": "ConnbytesMode",
"path": "iptables/enums.py",
"snippet": "class ConnbytesMode(str, Enum):\n BYTES = \"bytes\"\n PACKETS = \"packets\"\n AVGERAGE = \"avgpkt\""
},
{
"identifier": "ConntrackStates",
"path": "iptables/enums.py",
"snippet": "class ConntrackStates(str, Enum):\n INVALID = \"INVALID\"\n ESTABLISHED = \"ESTABLISHED\"\n RELATED = \"RELATED\"\n UNTRACKED = \"UNTRACKED\"\n SNAT = \"SNAT\"\n DNAT = \"DNAT\"\n NEW = \"NEW\""
},
{
"identifier": "ConntrackStatus",
"path": "iptables/enums.py",
"snippet": "class ConntrackStatus(str, Enum):\n NONE = \"NONE\"\n EXPECTED = \"EXPECTED\"\n SEEN_REPLY = \"SEEN_REPLY\"\n ASSURED = \"ASSURED\"\n CONFIRMED = \"CONFIRMED\""
},
{
"identifier": "ConntrackDirection",
"path": "iptables/enums.py",
"snippet": "class ConntrackDirection(str, Enum):\n ORIGINAL = \"original\"\n REPLY = \"reply\""
},
{
"identifier": "LimitUnits",
"path": "iptables/enums.py",
"snippet": "class LimitUnits(str, Enum):\n SECOND = \"second\"\n MINUTE = \"minute\"\n HOUR = \"hour\"\n DAY = \"day\""
},
{
"identifier": "State",
"path": "iptables/enums.py",
"snippet": "class State(str, Enum):\n INVALID = \"INVALID\"\n ESTABLISHED = \"ESTABLISHED\"\n NEW = \"NEW\"\n RELATED = \"RELATED\"\n UNTRACKED = \"UNTRACKED\""
},
{
"identifier": "TcpFlags",
"path": "iptables/enums.py",
"snippet": "class TcpFlags(str, Enum):\n SYN = \"SYN\"\n ACK = \"ACK\"\n FIN = \"FIN\"\n RST = \"RST\"\n URG = \"URG\"\n PSH = \"PSH\"\n ALL = \"ALL\"\n NONE = \"NONE\""
},
{
"identifier": "Targets",
"path": "iptables/enums.py",
"snippet": "class Targets(str, Enum):\n ACCEPT = \"ACCEPT\"\n DROP = \"DROP\"\n RETURN = \"RETURN\"\n AUDIT = \"AUDIT\"\n CHECKSUM = \"CHECKSUM\"\n CLASSIFY = \"CLASSIFY\"\n CLUSTERIP = \"CLUSTERIP\"\n CONNMARK = \"CONNMARK\"\n CONNSECMARK = \"CONNSECMARK\"\n CT = \"CT\"\n DNAT = \"DNAT\"\n DNPT = \"DNPT\"\n DSCP = \"DSCP\"\n ECN = \"ECN\"\n HL = \"HL\"\n HMARK = \"HMARK\"\n IDLETIMER = \"IDLETIMER\"\n LED = \"LED\"\n LOG = \"LOG\"\n MARK = \"MARK\"\n MASQUERADE = \"MASQUERADE\"\n NETMAP = \"NETMAP\"\n NFLOG = \"NFLOG\"\n NFQUEUE = \"NFQUEUE\"\n NOTRACK = \"NOTRACK\"\n RATEEST = \"RATEEST\"\n REDIRECT = \"REDIRECT\"\n REJECT = \"REJECT\"\n SECMARK = \"SECMARK\"\n SET = \"SET\"\n SNAT = \"SNAT\"\n SNPT = \"SNPT\"\n SYNPROXY = \"SYNPROXY\"\n TCPMSS = \"TCPMSS\"\n TCPOPTSTRIP = \"TCPOPTSTRIP\"\n TEE = \"TEE\"\n TOS = \"TOS\"\n TPROXY = \"TPROXY\"\n TRACE = \"TRACE\"\n TTL = \"TTL\"\n ULOG = \"ULOG\""
},
{
"identifier": "Protocols",
"path": "iptables/enums.py",
"snippet": "class Protocols(str, Enum):\n TCP = \"tcp\"\n UDP = \"udp\"\n ICMP = \"icmp\"\n ALL = \"all\""
},
{
"identifier": "Tables",
"path": "iptables/enums.py",
"snippet": "class Tables(str, Enum):\n FILTER = \"filter\"\n NAT = \"nat\"\n MANGLE = \"mangle\"\n RAW = \"raw\"\n SECURITY = \"security\""
},
{
"identifier": "Chains",
"path": "iptables/enums.py",
"snippet": "class Chains(str, Enum):\n INPUT = \"INPUT\"\n FORWARD = \"FORWARD\"\n OUTPUT = \"OUTPUT\"\n PREROUTING = \"PREROUTING\"\n POSTROUTING = \"POSTROUTING\""
},
{
"identifier": "Actions",
"path": "iptables/enums.py",
"snippet": "class Actions(str, Enum):\n APPEND = \"-A\"\n DELETE = \"-D\"\n INSERT = \"-I\"\n REPLACE = \"-R\"\n CHECK = \"-C\"\n LIST = \"-L\"\n FLUSH = \"-F\"\n ZERO = \"-Z\"\n NEW_CHAIN = \"-N\"\n DELETE_CHAIN = \"-X\"\n RENAME_CHAIN = \"-E\"\n POLICY = \"-P\"\n LIST_RULES = \"-S\""
},
{
"identifier": "RejectType",
"path": "iptables/enums.py",
"snippet": "class RejectType(str, Enum):\n ICMP_NET_UNREACHABLE = \"icmp-net-unreachable\"\n ICMP_HOST_UNREACHABLE = \"icmp-host-unreachable\"\n ICMP_PORT_UNREACHABLE = \"icmp-port-unreachable\"\n ICMP_PROT_UNREACHABLE = \"icmp-proto-unreachable\"\n ICMP_NET_PROHIBITED = \"icmp-net-prohibited\"\n ICMP_HOST_PROHIBITED = \"icmp-host-prohibited\"\n ICMP_ADMIN_PROHIBITED = \"icmp-admin-prohibited\"\n TCP_RESET = \"tcp-reset\"\n ICMP6_NO_ROUTE = \"icmp6-no-route\"\n NO_ROUTE = \"no-route\"\n ICMP6_ADM_PROHIBITED = \"icmp6-adm-prohibited\"\n ADM_PROHIBITED = \"adm-prohibited\"\n ICMP6_ADDR_UNREACHABLE = \"icmp6-addr-unreachable\"\n ADDR_UNREACHABLE = \"addr-unreach\"\n ICMP6_PORT_UNREACHABLE = \"icmp6-port-unreachable\""
},
{
"identifier": "IPTablesError",
"path": "iptables/exceptions.py",
"snippet": "class IPTablesError(Exception):\n pass"
},
{
"identifier": "IPVersionError",
"path": "iptables/exceptions.py",
"snippet": "class IPVersionError(IPTablesError):\n def __init__(self):\n super().__init__(\"ipv4 and ipv6 cannot be both True\")"
},
{
"identifier": "ConnbytesError",
"path": "iptables/exceptions.py",
"snippet": "class ConnbytesError(IPTablesError):\n def __init__(self):\n super().__init__(\"connbytes must be in the format of 'bytes:bytes'\")"
},
{
"identifier": "ConnlimitAddrError",
"path": "iptables/exceptions.py",
"snippet": "class ConnlimitAddrError(IPTablesError):\n def __init__(self):\n super().__init__(\"saddr and daddr cannot be both True\")"
},
{
"identifier": "MultiportSourceAndDestinationError",
"path": "iptables/exceptions.py",
"snippet": "class MultiportSourceAndDestinationError(IPTablesError):\n def __init__(self):\n super().__init__(\"source_ports and destination_ports cannot be both True\")"
},
{
"identifier": "MultiportPortsAndOtherError",
"path": "iptables/exceptions.py",
"snippet": "class MultiportPortsAndOtherError(IPTablesError):\n def __init__(self):\n super().__init__(\"ports cannot be used with source_ports or destination_ports\")"
},
{
"identifier": "MultiportFormatError",
"path": "iptables/exceptions.py",
"snippet": "class MultiportFormatError(IPTablesError):\n def __init__(self):\n super().__init__(\"ports must be an int or a string in format of 'port:port'\")"
}
] | import dataclasses
import re
from enum import Enum
from typing import Optional, Union, List, Tuple
from typing_extensions import Self
from iptables.enums import ConnbytesDirection, ConnbytesMode, ConntrackStates, ConntrackStatus, ConntrackDirection, \
LimitUnits, State, TcpFlags, Targets, Protocols, Tables, Chains, Actions, RejectType
from iptables.exceptions import IPTablesError, IPVersionError, ConnbytesError, ConnlimitAddrError, \
MultiportSourceAndDestinationError, MultiportPortsAndOtherError, MultiportFormatError | 4,255 | # owner, physdev, pkttype, policty, qouta, rateest, realm, recent, rpfilter, rt, sctp, set, socket, statistics,
# tcpmss, time, tos, ttl, u32
def comment(self, comment: str) -> Self:
self._modules.append(Module(module="comment", parameters=[("comment", f'"{comment}"')]))
return self
def connbytes(self, connbytes: str, mode: ConnbytesMode, direction: ConnbytesDirection) -> Self:
if not re.match("\d*:\d*", connbytes):
raise ConnbytesError
self._modules.append(Module(module="connbytes", parameters=[("connbytes", connbytes), ("connbytes-mode", mode),
("connbytes-dir", direction)]))
return self
def connlimit(
self,
upto: Optional[int] = None,
above: Optional[int] = None,
mask: Optional[int] = None,
sadder: bool = True,
daddr: bool = False
) -> Self:
if sadder and daddr:
raise ConnlimitAddrError
parameters = []
if upto:
parameters.append(("connlimit-upto", str(upto)))
if above:
parameters.append(("connlimit-above", str(above)))
if mask:
parameters.append(("connlimit-mask", str(mask)))
if sadder:
parameters.append(("connlimit-saddr", None))
if daddr:
parameters.append(("connlimit-daddr", None))
self._modules.append(Module(module="connlimit", parameters=parameters))
return self
def connmark(self, mark: int, mask: Optional[int] = None) -> Self:
if mask:
parameters = [("mark", f"{mark}/{mask}")]
else:
parameters = [("mark", mark)]
self._modules.append(Module(module="connmark", parameters=parameters))
return self
def conntrack(
self,
*,
state: Optional[List[ConntrackStates]] = None,
status: Optional[List[ConntrackStatus]] = None,
protocol: Optional[Protocols] = None,
original_source: Optional[str] = None,
original_source_port: Optional[int] = None,
original_destination: Optional[str] = None,
original_destination_port: Optional[int] = None,
reply_source: Optional[str] = None,
reply_source_port: Optional[int] = None,
reply_destination: Optional[str] = None,
reply_destination_port: Optional[int] = None,
expire: Optional[int] = None,
direction: Optional[ConntrackDirection] = None,
) -> Self:
parameters = []
if state:
parameters.append(("ctstate", ",".join(state)))
if status:
parameters.append(("ctstatus", ",".join(status)))
if protocol:
parameters.append(("ctproto", protocol))
if original_source:
parameters.append(("ctorigsrc", original_source))
if original_source_port:
parameters.append(("ctorigsrcport", original_source_port))
if original_destination:
parameters.append(("ctorigdst", original_destination))
if original_destination_port:
parameters.append(("ctorigdstport", original_destination_port))
if reply_source:
parameters.append(("ctreplsrc", reply_source))
if reply_source_port:
parameters.append(("ctreplsrcport", reply_source_port))
if reply_destination:
parameters.append(("ctrepldst", reply_destination))
if reply_destination_port:
parameters.append(("ctrepldstport", reply_destination_port))
if expire:
parameters.append(("ctexpire", expire))
if direction:
parameters.append(("ctdir", direction))
self._modules.append(Module(module="conntrack", parameters=parameters))
return self
def cpu(self, cpu: int) -> Self:
self._modules.append(Module(module="cpu", parameters=[("cpu", str(cpu))]))
return self
def limit(self, rate: int = 3, units: LimitUnits = LimitUnits.HOUR, burst: int = 5) -> Self:
self._modules.append(Module(module="limit", parameters=[("limit", f"{rate}/{units}"), ("limit-burst", burst)]))
return self
def mac(self, mac: str) -> Self:
self._modules.append(Module(module="mac", parameters=[("mac-source", mac)]))
return self
def mark(self, mark: int, mask: Optional[int] = None) -> Self:
if mask:
parameters = [("mark", f"{mark}/{mask}")]
else:
parameters = [("mark", mark)]
self._modules.append(Module(module="mark", parameters=parameters))
return self
def multiport(
self,
source_ports: Optional[List[Union[int, str]]] = None,
destination_ports: Optional[List[Union[int, str]]] = None,
ports: Optional[List[Union[int, str]]] = None
) -> Self:
if source_ports and destination_ports:
|
@dataclasses.dataclass(frozen=True)
class Module:
module: str
parameters: List[Tuple[str, str]] = dataclasses.field(default_factory=list)
def build(self) -> str:
parameters = []
for argument, value in self.parameters:
if value:
parameters.append(f"--{argument} {value}")
else:
parameters.append(f"--{argument}")
return f"-m {self.module} {' '.join(parameters)}"
@dataclasses.dataclass(frozen=True)
class Flags:
ipv4: bool = True
ipv6: bool = False
fragment: bool = False
lock: bool = False # same as --wait
verbose: bool = False
resolve: bool = True # same as --numeric
exact: bool = False
def __post_init__(self) -> None:
if self.ipv4 and self.ipv6:
raise IPVersionError
def build(self) -> str:
flags = []
if self.fragment:
flags.append("-f")
if self.ipv4:
flags.append("-4")
elif self.ipv6:
flags.append("-6")
if self.lock:
flags.append("-w")
if self.verbose:
flags.append("-v")
if not self.resolve:
flags.append("-n")
if self.exact:
flags.append("-x")
return " ".join(flags)
def __str__(self) -> str:
return self.build()
@dataclasses.dataclass(frozen=True)
class Matches:
# TODO: add set-counters
protocol: Optional[Protocols] = None
source_host: Optional[str] = None
source_port: Optional[int] = None
destination_host: Optional[str] = None
destination_port: Optional[int] = None
in_interface: Optional[str] = None
out_interface: Optional[str] = None
def build(self) -> str:
matches = []
if self.protocol:
matches.append(f"-p {self.protocol}")
if self.source_host:
matches.append(f"-s {self.source_host}")
if self.source_port:
matches.append(f"--sport {self.source_port}")
if self.destination_host:
matches.append(f"-d {self.destination_host}")
if self.destination_port:
matches.append(f"--dport {self.destination_port}")
if self.in_interface:
matches.append(f"-i {self.in_interface}")
if self.out_interface:
matches.append(f"-o {self.out_interface}")
return " ".join(matches)
def __str__(self) -> str:
return self.build()
def __bool__(self) -> bool:
return any([self.protocol, self.source_host, self.source_port, self.destination_host, self.destination_port,
self.in_interface, self.out_interface])
@dataclasses.dataclass(frozen=True)
class Target:
target: Targets
parameters: List[Tuple[str, str]] = dataclasses.field(default_factory=list)
def build(self) -> str:
parameters = []
for argument, value in self.parameters:
if value:
parameters.append(f"--{argument} {value}")
else:
parameters.append(f"--{argument}")
if parameters:
return f"-j {self.target} {' '.join(parameters)}"
else:
return f"-j {self.target}"
def __str__(self) -> str:
return self.build()
def _get_value(value: Union[Enum, str]) -> str:
if isinstance(value, Enum):
return value.value
else:
return value
class IPTablesRule:
def __init__(
self,
*,
table: Tables = Tables.FILTER,
chain: Optional[Union[str, Chains]] = None,
action: Optional[Actions] = None,
target: Optional[Target] = None,
flags: Flags = Flags(),
matches: Matches = Matches(),
) -> None:
self._table = table
self._chain = chain
self._action = action
self._target = target
self._flags = flags
self._matches = matches
self._modules = []
# region base
def table(self, table: Tables) -> Self:
self._table = table
return self
def chain(self, chain: Union[str, Chains]) -> Self:
self._chain = chain
return self
def action(self, action: Actions) -> Self:
self._action = action
return self
def target(self, target: Target) -> Self:
self._target = target
return self
# endregion
# region actions
def append(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.APPEND
if chain:
self._chain = chain
return self
def delete(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.DELETE
if chain:
self._chain = chain
return self
def insert(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.INSERT
if chain:
self._chain = chain
return self
def replace(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.REPLACE
if chain:
self._chain = chain
return self
def check(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.CHECK
if chain:
self._chain = chain
return self
def list(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.LIST
if chain:
self._chain = chain
return self
def flush(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.FLUSH
if chain:
self._chain = chain
return self
def zero(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.ZERO
if chain:
self._chain = chain
return self
def new_chain(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.NEW_CHAIN
if chain:
self._chain = chain
return self
def delete_chain(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.DELETE_CHAIN
if chain:
self._chain = chain
return self
def rename_chain(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.RENAME_CHAIN
if chain:
self._chain = chain
return self
def policy(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.POLICY
if chain:
self._chain = chain
return self
def list_rules(self, chain: Optional[Union[str, Chains]]) -> Self:
self._action = Actions.LIST_RULES
if chain:
self._chain = chain
return self
# endregion
# region flags
def ipv4(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, ipv4=enable, ipv6=not enable)
return self
def ipv6(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, ipv6=enable, ipv4=not enable)
return self
def fragment(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, fragment=enable)
return self
def lock(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, lock=enable)
return self
def verbose(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, verbose=enable)
return self
def resolve(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, resolve=enable)
return self
def exact(self, enable: bool = True) -> Self:
self._flags = dataclasses.replace(self._flags, exact=enable)
return self
# endregion
# region matches
def protocol(self, protocol: Protocols) -> Self:
self._matches = dataclasses.replace(self._matches, protocol=protocol)
return self
def p(self, protocol: Protocols) -> Self:
return self.protocol(protocol)
def source(self, host: str, port: int) -> Self:
self.source_host(host).source_port(port)
return self
def source_host(self, source_host: str) -> Self:
self._matches = dataclasses.replace(self._matches, source_host=source_host)
return self
def src(self, source_host: str) -> Self:
return self.source_host(source_host)
def s(self, source_host: str) -> Self:
return self.source_host(source_host)
def source_port(self, source_port: int) -> Self:
self._matches = dataclasses.replace(self._matches, source_port=source_port)
return self
def sport(self, source_port: int) -> Self:
return self.source_port(source_port)
def destination(self, host: str, port: int) -> Self:
self.destination_host(host).destination_port(port)
return self
def destination_host(self, destination_host: str) -> Self:
self._matches = dataclasses.replace(self._matches, destination_host=destination_host)
return self
def dst(self, destination_host: str) -> Self:
return self.destination_host(destination_host)
def d(self, destination_host: str) -> Self:
return self.destination_host(destination_host)
def destination_port(self, destination_port: int) -> Self:
self._matches = dataclasses.replace(self._matches, destination_port=destination_port)
return self
def dport(self, destination_port: int) -> Self:
return self.destination_port(destination_port)
def in_interface(self, in_interface: str) -> Self:
self._matches = dataclasses.replace(self._matches, in_interface=in_interface)
return self
def i(self, in_interface: str) -> Self:
return self.in_interface(in_interface)
def out_interface(self, out_interface: str) -> Self:
self._matches = dataclasses.replace(self._matches, out_interface=out_interface)
return self
def o(self, out_interface: str) -> Self:
return self.out_interface(out_interface)
# endregion
# region modules
# TODO: missing: dccp, addrtype, ah, bpf, cgroup, cluster, devgroup, dscp, dst, ecn,
# esp, eui64, frag, hashlimit, hbh, helper, hl, icmp, icmp6, iprange, ipv6header, ipvs, length, mh, nfacct, osf,
# owner, physdev, pkttype, policty, qouta, rateest, realm, recent, rpfilter, rt, sctp, set, socket, statistics,
# tcpmss, time, tos, ttl, u32
def comment(self, comment: str) -> Self:
self._modules.append(Module(module="comment", parameters=[("comment", f'"{comment}"')]))
return self
def connbytes(self, connbytes: str, mode: ConnbytesMode, direction: ConnbytesDirection) -> Self:
if not re.match("\d*:\d*", connbytes):
raise ConnbytesError
self._modules.append(Module(module="connbytes", parameters=[("connbytes", connbytes), ("connbytes-mode", mode),
("connbytes-dir", direction)]))
return self
def connlimit(
self,
upto: Optional[int] = None,
above: Optional[int] = None,
mask: Optional[int] = None,
sadder: bool = True,
daddr: bool = False
) -> Self:
if sadder and daddr:
raise ConnlimitAddrError
parameters = []
if upto:
parameters.append(("connlimit-upto", str(upto)))
if above:
parameters.append(("connlimit-above", str(above)))
if mask:
parameters.append(("connlimit-mask", str(mask)))
if sadder:
parameters.append(("connlimit-saddr", None))
if daddr:
parameters.append(("connlimit-daddr", None))
self._modules.append(Module(module="connlimit", parameters=parameters))
return self
def connmark(self, mark: int, mask: Optional[int] = None) -> Self:
if mask:
parameters = [("mark", f"{mark}/{mask}")]
else:
parameters = [("mark", mark)]
self._modules.append(Module(module="connmark", parameters=parameters))
return self
def conntrack(
self,
*,
state: Optional[List[ConntrackStates]] = None,
status: Optional[List[ConntrackStatus]] = None,
protocol: Optional[Protocols] = None,
original_source: Optional[str] = None,
original_source_port: Optional[int] = None,
original_destination: Optional[str] = None,
original_destination_port: Optional[int] = None,
reply_source: Optional[str] = None,
reply_source_port: Optional[int] = None,
reply_destination: Optional[str] = None,
reply_destination_port: Optional[int] = None,
expire: Optional[int] = None,
direction: Optional[ConntrackDirection] = None,
) -> Self:
parameters = []
if state:
parameters.append(("ctstate", ",".join(state)))
if status:
parameters.append(("ctstatus", ",".join(status)))
if protocol:
parameters.append(("ctproto", protocol))
if original_source:
parameters.append(("ctorigsrc", original_source))
if original_source_port:
parameters.append(("ctorigsrcport", original_source_port))
if original_destination:
parameters.append(("ctorigdst", original_destination))
if original_destination_port:
parameters.append(("ctorigdstport", original_destination_port))
if reply_source:
parameters.append(("ctreplsrc", reply_source))
if reply_source_port:
parameters.append(("ctreplsrcport", reply_source_port))
if reply_destination:
parameters.append(("ctrepldst", reply_destination))
if reply_destination_port:
parameters.append(("ctrepldstport", reply_destination_port))
if expire:
parameters.append(("ctexpire", expire))
if direction:
parameters.append(("ctdir", direction))
self._modules.append(Module(module="conntrack", parameters=parameters))
return self
def cpu(self, cpu: int) -> Self:
self._modules.append(Module(module="cpu", parameters=[("cpu", str(cpu))]))
return self
def limit(self, rate: int = 3, units: LimitUnits = LimitUnits.HOUR, burst: int = 5) -> Self:
self._modules.append(Module(module="limit", parameters=[("limit", f"{rate}/{units}"), ("limit-burst", burst)]))
return self
def mac(self, mac: str) -> Self:
self._modules.append(Module(module="mac", parameters=[("mac-source", mac)]))
return self
def mark(self, mark: int, mask: Optional[int] = None) -> Self:
if mask:
parameters = [("mark", f"{mark}/{mask}")]
else:
parameters = [("mark", mark)]
self._modules.append(Module(module="mark", parameters=parameters))
return self
def multiport(
self,
source_ports: Optional[List[Union[int, str]]] = None,
destination_ports: Optional[List[Union[int, str]]] = None,
ports: Optional[List[Union[int, str]]] = None
) -> Self:
if source_ports and destination_ports: | raise MultiportSourceAndDestinationError | 18 | 2023-12-17 17:00:49+00:00 | 8k |
daihaojun554/biliscrapy | biliscrapy/views.py | [
{
"identifier": "BiliDanmu",
"path": "biliscrapy/models.py",
"snippet": "class BiliDanmu(models.Model):\n _id = models.CharField(max_length=255)\n cid = models.CharField(max_length=255)\n content = models.TextField()\n color = models.CharField(max_length=255)\n fontsize = models.IntegerField()\n midHash = models.CharField(max_length=255)\n mode = models.CharField(max_length=255)\n progress = models.FloatField()\n ctime = models.DateTimeField()\n\n def __str__(self):\n return self.content"
},
{
"identifier": "BiliComment",
"path": "biliscrapy/models.py",
"snippet": "class BiliComment(models.Model):\n avid = models.CharField(max_length=255)\n uname = models.CharField(max_length=255)\n # 最高等级就是6级\n current_level = models.IntegerField()\n # 用户等级\n like = models.IntegerField()\n # 用户性别 男 女 保密\n sex = models.CharField(max_length=10)\n ctime = models.DateTimeField()\n message = models.TextField()\n\n def __str__(self):\n return self.message"
},
{
"identifier": "BiliVideo",
"path": "biliscrapy/models.py",
"snippet": "class BiliVideo(models.Model):\n bvid = models.CharField(max_length=30, unique=True)\n avid = models.IntegerField(unique=True)\n oid = models.IntegerField(unique=True)\n title = models.CharField(max_length=100)\n author = models.CharField(max_length=100)\n tag = models.CharField(max_length=100)\n pubdate = models.DateField()\n pic = models.URLField()\n desc = models.TextField()\n danmu_fetched = models.BooleanField(default=False)\n comment_fetched = models.BooleanField(default=False)\n danmaku_count = models.IntegerField(default=0)\n comment_count = models.IntegerField(default=0)\n\n def __str__(self):\n return self.title"
},
{
"identifier": "Card",
"path": "biliscrapy/models.py",
"snippet": "class Card(models.Model):\n card_code = models.CharField(max_length=100, unique=True)\n expiration_date = models.DateTimeField()\n last_used_address = models.GenericIPAddressField(null=True, blank=True)\n is_used = models.BooleanField(default=False)\n # action = models.CharField(max_length=100)\n # is_active = models.BooleanField(default=True)\n # is_expired = models.BooleanField(default=False)\n # count = models.IntegerField(default=0)\n\n def __str__(self):\n return self.card_code"
},
{
"identifier": "Comments",
"path": "biliscrapy/network/bilibili_comment.py",
"snippet": "class Comments:\n def __init__(self):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n # 构建文件路径\n file_path = os.path.join(script_dir, 'bilibili_cookies.json')\n if not file_path:\n self.cookies = {}\n with open(file_path, 'r', encoding='utf-8') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n self.utils = bili_utils()\n self.logger = logging.getLogger('log')\n\n def extract_comments(self, replies):\n extracted_comments = []\n if not replies:\n return extracted_comments\n for reply in replies:\n extracted_comment = {\n 'uname': reply['member']['uname'],\n 'current_level': reply['member']['level_info']['current_level'],\n 'like': reply['like'],\n 'sex': reply['member']['sex'],\n 'ctime': reply['ctime'],\n 'message': reply['content']['message']\n }\n extracted_comments.append(extracted_comment)\n\n if 'replies' in reply and reply['replies']:\n nested_replies = self.extract_comments(reply['replies'])\n extracted_comments.extend(nested_replies)\n\n return extracted_comments\n\n def get_comments(self, bvorurl):\n self.logger.info(\"Getting comments for bvorurl:{}\".format(bvorurl))\n bv = self.utils.bv_get(bvorurl)\n avid = self.utils.bv2av(bv)\n count = 1\n while avid is None:\n avid = self.utils.bv2av(bv)\n count += 1\n self.logger.info(f\"avid is None, retrying...count is {count}\")\n time.sleep(3)\n self.logger.info(f\"avid===>{avid}\")\n comments = [] # 使用列表存储评论\n\n # 获取评论总数和每页评论数量\n # 计算总页数\n page_num = 1\n page_size = 20\n\n while True:\n url = f'https://api.bilibili.com/x/v2/reply?type=1&oid={avid}&sort=2&pn={page_num}&ps={page_size}'\n response = requests.get(url, headers=headers, cookies=self.cookies)\n data = response.json()\n if data['code'] != 0:\n break\n # 提取回复信息\n extracted_data = self.extract_comments(data['data']['replies'])\n\n # 过滤重复的评论\n new_comments = [comment for comment in extracted_data if comment not in comments]\n comments.extend(new_comments) # 将新的评论添加到列表中\n self.logger.info(f\"提取到了{len(new_comments)}条评论,从第 {page_num} 页\")\n if len(new_comments) == 0:\n self.logger.info(\"提取完毕所有评论,共提取到{}条评论!=====>avid{}\".format(len(comments), avid))\n break\n # 判断是否有下一页\n total_count = data['data']['page']['count']\n total_pages = (total_count + page_size - 1) // page_size # 计算总页数\n if page_num >= total_pages:\n self.logger.info(\"提取完毕所有评论,共提取到{}条评论!=====>avid{}\".format(len(comments), avid))\n break\n\n # 构建下一页的URL\n page_num += 1\n self.logger.info(\"开始提取第{}页评论\".format(page_num))\n time.sleep(random.uniform(0.5, 1.5))\n self.logger.info(f\"总共{len(comments)}条评论!\")\n\n # 写入JSON文件\n os.makedirs(\"./data/comment/\", exist_ok=True) # 创建多层目录\n file_path = f'./data/comment/{avid}_{page_num}-{page_size}_{len(comments)}.json'\n if len(comments) < 2000:\n with open(file_path, 'w', encoding='utf-8') as f:\n json.dump(comments, f, indent=4, ensure_ascii=False)\n return comments"
},
{
"identifier": "bili_utils",
"path": "biliscrapy/network/bilibili_utils.py",
"snippet": "class bili_utils:\n def __init__(self):\n self.logger = logging.getLogger('log')\n self.header = headers\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')\n with open(file_path, 'r') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n\n def bv_get(self, bvorurl):\n # https://api.bilibili.com/x/web-interface/view?bvid=BV1uG41197Tf\n # 将bv提取出来\n bv_identifier = \"BV\" # BV号的标识符\n if \"http://\" in bvorurl or \"https://\" in bvorurl: # 检查是否是一个URL\n self.logger.info(\"你输入的是http链接,正在解析...\")\n bv_index = bvorurl.find(bv_identifier)\n if bv_index != -1: # 如果找到了BV号\n bv = bvorurl[bv_index:bv_index + len(bv_identifier) + 10] # 提取BV号\n self.logger.info(f\"BV号为......: {bv}\")\n return bv\n else:\n self.logger.info(\"你输入的链接地址有误!\")\n return\n elif bv_identifier in bvorurl: # 如果输入的是BV号\n self.logger.info(f\"你输入的是BV号{bvorurl},正在解析...\")\n bv = bvorurl\n return bv\n else:\n self.logger.info(f\"请输入正确的链接地址或BV号!,{bvorurl}\")\n return \"BV1111111111\"\n\n '''\n av 就是 oid 评论里面的参数\n '''\n\n def bv2av(self, bv):\n bv2av_url = 'https://api.bilibili.com/x/web-interface/view?bvid='\n if bv.startswith(\"BV\"):\n url = bv2av_url + str(bv)\n retry_count = 0\n max_retries = 10\n retry_delay = 1 # seconds\n while retry_count < max_retries:\n try:\n response = requests.get(url,headers=headers,cookies=self.cookies)\n response.raise_for_status() # 检查请求是否成功\n data = response.json()\n # self.logger.info(data)\n if 'data' in data and 'aid' in data['data']:\n avid = data['data']['aid']\n self.logger.info(f\"找到的avid{avid}\")\n return avid\n else:\n self.logger.info(\"未找到有效的aid值,正在重新尝试获取...\")\n retry_count += 1\n time.sleep(retry_delay)\n except (requests.RequestException, ValueError) as e:\n self.logger.info(f\"请求发生错误:{e}\")\n retry_count += 1\n self.logger.info(\"服务器返回错误!请稍后再试!\")\n self.logger.info(f\"正在重新尝试获取aid,尝试次数==>{retry_count}\")\n time.sleep(retry_delay)\n\n return None\n\n '''\n cid 是弹幕用的参数\n '''\n\n def bv2cid(self, bv):\n url = f\"https://api.bilibili.com/x/player/pagelist?bvid={str(bv)}&jsonp=jsonp\"\n retry_count = 1\n json_s = requests.get(url,headers=headers,cookies=self.cookies).json()\n self.logger.info(\"bv====》\"+bv)\n if json_s['code'] == 0:\n cid = json_s['data'][0]['cid']\n self.logger.info(\"提取出来的cid是:\" + str(cid))\n return cid\n else:\n self.logger.error(\"服务器返回错误!请稍后再试!\")\n retry_count+=1\n if retry_count > 10:\n self.logger.error(\"尝试次数过多,请稍后再试!\")\n return None\n else:\n self.logger.error(\"正在重新尝试获取cid,尝试次数==>\" + str(retry_count))\n return self.bv2cid(bv)\n\n def get_bilibili_cookies(self):\n options = webdriver.ChromeOptions()\n # options.add_argument('--headless')\n # options.add_argument('--disable-gpu')\n # 动态获取路径 不用每次都手动输入路径\n # chromedriver.exe 的路径\n # 获取当前脚本的绝对路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n\n # 构建 chromedriver 的绝对路径\n driver_path = os.path.join(current_path, 'chromedriver.exe')\n\n # 创建 WebDriver 服务\n service = Service(driver_path)\n # service = Service('./chromedriver.exe')\n options.add_argument('--no-sandbox')\n options.binary_location='C:\\\\Program Files\\\\Google\\\\chrome-win64\\\\chrome.exe'\n driver = webdriver.Chrome(options=options, service=service)\n\n # 打开 Bilibili 网站\n driver.get('https://www.bilibili.com/')\n #\n login_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR,\n '#i_cecream > div.bili-feed4 > div.bili-header.large-header > div.bili-header__bar > ul.right-entry > li:nth-child(1) > li > div.right-entry__outside.go-login-btn')))\n login_btn.click()\n # 等待登录完成成\n time.sleep(10)\n driver.get('https://www.bilibili.com/')\n # 在这里,模拟登录流程(需要输入账号和密码)\n # 扫码登录然后,等待完成,完成的条件是屏幕上出现了某个\n\n search = WebDriverWait(driver, 20).until(\n EC.presence_of_element_located((By.CSS_SELECTOR, '#nav-searchform > div.nav-search-btn')))\n search.click()\n time.sleep(3)\n cookies = driver.get_cookies()\n # 获取当前脚本的路径\n current_path = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(current_path, 'bilibili_cookies.json'), 'w') as f:\n # 写入当前文件\n f.write(json.dumps(cookies))\n # 写入成功\n self.logger.info('写入成功{}'.format(cookies))\n driver.quit()\n return\n\n def get_info_by_bv(self, bv):\n url = f\"https://api.bilibili.com/x/web-interface/view?bvid={str(bv)}\"\n\n def try_get(url):\n try:\n response = requests.get(url, headers=self.header, cookies=self.cookies)\n js_str = response.json()\n if js_str.get('code', 0) == 0:\n return js_str['data']\n else:\n # 可能需要根据API的设计,记录不同的错误\n self.logger.error(\n f\"Video API returned non-success code: {js_str.get('code', 'Unknown')} with message: {js_str.get('msg', 'Unknown')}\")\n except requests.exceptions.RequestException as e:\n self.logger.error(f\"An error occurred: {e}\")\n return None\n\n result = None\n retry_count = 10\n for _ in range(retry_count):\n result = try_get(url)\n if result:\n break\n\n return result\n\n # 检查url是否合法\n def check_url(self, url):\n if url.startswith(\"BV\"):\n return True\n elif url.startswith(\"https://www.bilibili.com/\"):\n return True\n else:\n return False"
},
{
"identifier": "Video",
"path": "biliscrapy/network/bilibili_video.py",
"snippet": "class Video:\n def __init__(self):\n script_path = os.path.dirname(os.path.abspath(__file__))\n self.dir_path = os.path.join(script_path, 'data', 'video')\n os.makedirs(self.dir_path, exist_ok=True)\n self.utils = bili_utils()\n self.script_dir = os.path.dirname(os.path.abspath(__file__))\n # 构建文件路径\n file_path = os.path.join(self.script_dir, 'bilibili_cookies.json')\n if not file_path:\n self.cookies = {}\n with open(file_path, 'r') as file:\n self.cookies_data = json.load(file)\n self.cookies = {cookie['name']: cookie['value'] for cookie in self.cookies_data}\n self.headers = headers\n self.logger = logging.getLogger('log')\n\n def get_video_info(self, url: str) -> str:\n \"\"\"\n 从给定的URL中提取视频信息。\n :param url: 要获取信息的视频的URL。\n :return: 返回包含视频信息的JSON字符串,如果URL无效,则返回字符串'invalid url'。\n \"\"\"\n try:\n isValid = self.utils.check_url(url)\n if not isValid:\n return 'url is invalid'\n resp = requests.get(url, headers=self.headers, cookies=self.cookies)\n cont = re.compile(r\".*?window.__playinfo__=(?P<info1>.*?);\\(function\\(\\)\", re.S)\n a = cont.search(resp.text, re.S)\n info = a.group('info1').replace(\"</script><script>window.__INITIAL_STATE__=\", ',')\n return f\"[{info}]\"\n except requests.RequestException as e:\n self.logger.error(\"Error occurred while getting video info: {}\".format(str(e)))\n return ''\n\n def download_file(self, url, filename):\n \"\"\"\n 下载文件的函数\n\n 参数:\n url (str): 要下载的文件的URL\n filename (str): 下载的文件保存的路径和文件名\n \"\"\"\n try:\n response = requests.get(url, headers=self.headers, stream=True, cookies=self.cookies)\n total_size = int(response.headers.get('Content-Length', 0))\n block_size = 1024\n progress_bar = tqdm(total=total_size, unit='B', unit_scale=True)\n with open(os.path.join(self.dir_path, filename), 'wb') as file:\n for data in response.iter_content(block_size):\n file.write(data)\n progress_bar.update(len(data))\n progress_bar.close()\n self.logger.info(\"Downloading file.{}\".format(filename))\n except requests.exceptions.RequestException as e:\n self.logger.error(\"Error occurred while downloading the file: {}\".format(str(e)))\n\n def merge_video_audio(self, video_file, audio_file):\n \"\"\"\n 合并视频和音频文件。\n\n 参数:\n self: 类自身引用。\n video_file: 视频文件路径。\n audio_file: 音频文件路径。\n 返回值:\n 无\n 异常:\n 如果视频文件或音频文件不存在,则会打印错误消息并返回。\n 注意:\n 合并后的文件以视频文件的基础名称和 '.mp4' 扩展名的形式保存。\n 原始视频和音频文件在合并成功后会被删除。\n \"\"\"\n if not os.path.isfile(os.path.join(self.dir_path, video_file)):\n print(f\"Error: {video_file} 不是文件或不存在。\")\n return\n if not os.path.isfile(os.path.join(self.dir_path, audio_file)):\n print(f\"Error: {audio_file} 不是文件或不存在。\")\n return\n\n # 合并视频和音频文件\n # 使用ffmpeg命令行工具将视频和音频文件合并为mp4格式文件\n cmd = f\"ffmpeg -i {os.path.join(self.dir_path, video_file)} -i {os.path.join(self.dir_path, audio_file)} -c:v copy -c:a aac -strict experimental {os.path.join(self.dir_path, video_file.replace('.flv', ''))}.mp4\"\n self.logger.info(cmd)\n try:\n os.system(cmd)\n except Exception as e:\n print(f\"运行 ffmpeg 时发生错误: {e}\")\n return\n\n # 检查合并后的文件是否成功创建\n output_file = os.path.splitext(os.path.basename(video_file))[0] + '.mp4'\n if not os.path.isfile(os.path.join(self.dir_path, output_file)):\n print(\"文件合并失败。\")\n return\n\n # 删除原始视频和音频文件\n os.remove(os.path.join(self.dir_path, video_file))\n os.remove(os.path.join(self.dir_path, audio_file))\n self.logger.info(f\"成功合并视频和音频,------->{output_file}\")"
}
] | import time
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.utils.timezone import make_aware
from .models import BiliDanmu, BiliComment, BiliVideo, Card
from .network.bilibili_danmu import *
from .network.bilibili_comment import Comments
from .network.bilibili_utils import bili_utils
from .network.bilibili_video import Video
from django.utils import timezone
from django.http import JsonResponse, HttpResponse | 6,062 | context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!'
}
if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith("BV") or bv.startswith("bv"):
danmu = Danmu()
vv = BiliVideo.objects.filter(bvid=bvid).values()
cid = vv[0]['oid'] if vv else danmu.bv2cid(bv)
bvid_exists = BiliDanmu.objects.filter(cid=cid).exists()
if not bvid_exists:
logger.info("bvid_exists,不存在!!!")
dates = danmu.get_available_dates(cid) # 获取视频的所有日期列表
danmu.down_so_files(cid, dates) # 下载所有弹ci幕文件
unique_danmakus = danmu.parse_so_to_json(cid, dates) # 解析并保存为 JSON 文件
if unique_danmakus is None:
return render(request, 'danmaku.html',
context.update({'message': '解析弹幕失败,请检查BV号是否正确!'}))
danmu_objects = [
BiliDanmu(
_id=danmaku['_id'],
cid=cid,
content=danmaku['content'],
color=danmaku['color'],
fontsize=danmaku['fontsize'],
midHash=danmaku['midHash'],
mode=danmaku['mode'],
progress=danmaku['progress'],
ctime=make_aware(datetime.fromtimestamp(danmaku['ctime']))
)
for danmaku in unique_danmakus
]
BiliDanmu.objects.bulk_create(danmu_objects)
# 不存在 弹幕信息
danmaku_count = BiliDanmu.objects.filter(cid=cid).count()
print(danmaku_count)
try:
logger.info("try.....")
# 尝试更新视频的抓取弹幕的状态
logger.info(bvid)
video = BiliVideo.objects.get(bvid=bvid)
video.danmu_fetched = True
video.danmaku_count = danmaku_count
video.save()
except Exception as e:
logger.error("error~~~~~~~~~")
logger.error(e)
# 如果视频记录不存在,则创建新的视频记录
info = utils.get_info_by_bv(bvid)
logger.info("info---->{}".format(info))
if info is None:
return render(request, 'danmaku.html', context)
cid = utils.bv2cid(bvid)
logger.info(f'{cid}, cid')
video = BiliVideo(bvid=bvid,
avid=info['aid'],
oid=cid,
title=info['title'],
author=info['owner']['name'],
tag=info['tname'],
pubdate=make_aware(datetime.fromtimestamp(info['pubdate'])),
pic=info['pic'],
desc=info['desc'],
danmu_fetched=True,
danmaku_count=danmaku_count
) # 设置弹幕抓取状态
video.save()
logger.info("新视频信息已添加")
# 查询数据库并返回结果
# 查询数据库并返回结果
danmakus = BiliDanmu.objects.filter(cid=cid).values().order_by('ctime')
paginator = Paginator(danmakus, 15) # 每页显示10条记录
page_number = request.POST.get('page') if request.POST.get('page') else 1 # 获取页码参数
page_obj = paginator.get_page(page_number) # 获取对应页码的数据
print(paginator.count)
context = {
"url": url,
'result': 'error',
'bvid': bv,
'total': paginator.count,
'data': page_obj,
'new_request': not bvid_exists,
}
if len(danmakus) > 0:
context['result'] = 'success'
return render(request, 'danmaku.html', context)
return render(request, 'danmaku.html')
def comment(request):
if request.method == 'POST':
bv = request.POST.get('bv') # 获取用户输入的 BV 号或链接
url = bv
context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!',
'cid': ''
}
c = Comments()
bv_ = utils.bv_get(bv) if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith(
"BV") or bv.startswith("bv") else bv
logger.info(f'bv_====>{bv_}')
vv = BiliVideo.objects.filter(bvid=bv_).values()
# logger.info(vv[0]['avid'], 'sadjkaskjadssajasjdsjkaaashhakads')
av = utils.bv2av(bv_)
av_count = 1
while av is None:
logger.info(f"av is None, retrying...{av_count}")
av_count += 1
av = utils.bv2av(bv_)
avid = vv[0]['avid'] if vv else av
logger.info(f"avid=====>{avid}")
if avid is None:
context = {
'result': 'error',
'data': [],
'message': 'b站服务器返回错误,请重新尝试'
}
return render(request, 'comment.html', context)
|
# Create your views here.
utils = bili_utils()
bili_video = Video()
logger = logging.getLogger('log')
base_url = 'https://www.bilibili.com/video/'
def danmaku(request):
if request.method == 'POST':
bv = request.POST.get('bv') # 获取用户输入的 BV 号或链接
bvid = utils.bv_get(bv)
url = bv
context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!'
}
if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith("BV") or bv.startswith("bv"):
danmu = Danmu()
vv = BiliVideo.objects.filter(bvid=bvid).values()
cid = vv[0]['oid'] if vv else danmu.bv2cid(bv)
bvid_exists = BiliDanmu.objects.filter(cid=cid).exists()
if not bvid_exists:
logger.info("bvid_exists,不存在!!!")
dates = danmu.get_available_dates(cid) # 获取视频的所有日期列表
danmu.down_so_files(cid, dates) # 下载所有弹ci幕文件
unique_danmakus = danmu.parse_so_to_json(cid, dates) # 解析并保存为 JSON 文件
if unique_danmakus is None:
return render(request, 'danmaku.html',
context.update({'message': '解析弹幕失败,请检查BV号是否正确!'}))
danmu_objects = [
BiliDanmu(
_id=danmaku['_id'],
cid=cid,
content=danmaku['content'],
color=danmaku['color'],
fontsize=danmaku['fontsize'],
midHash=danmaku['midHash'],
mode=danmaku['mode'],
progress=danmaku['progress'],
ctime=make_aware(datetime.fromtimestamp(danmaku['ctime']))
)
for danmaku in unique_danmakus
]
BiliDanmu.objects.bulk_create(danmu_objects)
# 不存在 弹幕信息
danmaku_count = BiliDanmu.objects.filter(cid=cid).count()
print(danmaku_count)
try:
logger.info("try.....")
# 尝试更新视频的抓取弹幕的状态
logger.info(bvid)
video = BiliVideo.objects.get(bvid=bvid)
video.danmu_fetched = True
video.danmaku_count = danmaku_count
video.save()
except Exception as e:
logger.error("error~~~~~~~~~")
logger.error(e)
# 如果视频记录不存在,则创建新的视频记录
info = utils.get_info_by_bv(bvid)
logger.info("info---->{}".format(info))
if info is None:
return render(request, 'danmaku.html', context)
cid = utils.bv2cid(bvid)
logger.info(f'{cid}, cid')
video = BiliVideo(bvid=bvid,
avid=info['aid'],
oid=cid,
title=info['title'],
author=info['owner']['name'],
tag=info['tname'],
pubdate=make_aware(datetime.fromtimestamp(info['pubdate'])),
pic=info['pic'],
desc=info['desc'],
danmu_fetched=True,
danmaku_count=danmaku_count
) # 设置弹幕抓取状态
video.save()
logger.info("新视频信息已添加")
# 查询数据库并返回结果
# 查询数据库并返回结果
danmakus = BiliDanmu.objects.filter(cid=cid).values().order_by('ctime')
paginator = Paginator(danmakus, 15) # 每页显示10条记录
page_number = request.POST.get('page') if request.POST.get('page') else 1 # 获取页码参数
page_obj = paginator.get_page(page_number) # 获取对应页码的数据
print(paginator.count)
context = {
"url": url,
'result': 'error',
'bvid': bv,
'total': paginator.count,
'data': page_obj,
'new_request': not bvid_exists,
}
if len(danmakus) > 0:
context['result'] = 'success'
return render(request, 'danmaku.html', context)
return render(request, 'danmaku.html')
def comment(request):
if request.method == 'POST':
bv = request.POST.get('bv') # 获取用户输入的 BV 号或链接
url = bv
context = {
'result': 'error',
'data': [],
'message': '请输入正确的链接地址或BV号!',
'cid': ''
}
c = Comments()
bv_ = utils.bv_get(bv) if bv.startswith("https://www.bilibili.com/video/BV") or bv.startswith(
"BV") or bv.startswith("bv") else bv
logger.info(f'bv_====>{bv_}')
vv = BiliVideo.objects.filter(bvid=bv_).values()
# logger.info(vv[0]['avid'], 'sadjkaskjadssajasjdsjkaaashhakads')
av = utils.bv2av(bv_)
av_count = 1
while av is None:
logger.info(f"av is None, retrying...{av_count}")
av_count += 1
av = utils.bv2av(bv_)
avid = vv[0]['avid'] if vv else av
logger.info(f"avid=====>{avid}")
if avid is None:
context = {
'result': 'error',
'data': [],
'message': 'b站服务器返回错误,请重新尝试'
}
return render(request, 'comment.html', context) | comments_exist = BiliComment.objects.filter(avid=avid).exists() | 1 | 2023-12-14 10:14:24+00:00 | 8k |
mjavadpur/Sadtalker_LongVideos | src/facerender/modules/generator.py | [
{
"identifier": "ResBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class ResBlock2d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock2d, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.norm1 = BatchNorm2d(in_features, affine=True)\n self.norm2 = BatchNorm2d(in_features, affine=True)\n\n def forward(self, x):\n out = self.norm1(x)\n out = F.relu(out)\n out = self.conv1(out)\n out = self.norm2(out)\n out = F.relu(out)\n out = self.conv2(out)\n out += x\n return out"
},
{
"identifier": "SameBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class SameBlock2d(nn.Module):\n \"\"\"\n Simple block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1, lrelu=False):\n super(SameBlock2d, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,\n kernel_size=kernel_size, padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n if lrelu:\n self.ac = nn.LeakyReLU()\n else:\n self.ac = nn.ReLU()\n\n def forward(self, x):\n out = self.conv(x)\n out = self.norm(out)\n out = self.ac(out)\n return out"
},
{
"identifier": "UpBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class UpBlock2d(nn.Module):\n \"\"\"\n Upsampling block for use in decoder.\n \"\"\"\n\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n super(UpBlock2d, self).__init__()\n\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\n padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n\n def forward(self, x):\n out = F.interpolate(x, scale_factor=2)\n out = self.conv(out)\n out = self.norm(out)\n out = F.relu(out)\n return out"
},
{
"identifier": "DownBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class DownBlock2d(nn.Module):\n \"\"\"\n Downsampling block for use in encoder.\n \"\"\"\n\n def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n super(DownBlock2d, self).__init__()\n self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,\n padding=padding, groups=groups)\n self.norm = BatchNorm2d(out_features, affine=True)\n self.pool = nn.AvgPool2d(kernel_size=(2, 2))\n\n def forward(self, x):\n out = self.conv(x)\n out = self.norm(out)\n out = F.relu(out)\n out = self.pool(out)\n return out"
},
{
"identifier": "ResBlock3d",
"path": "src/facerender/modules/util.py",
"snippet": "class ResBlock3d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock3d, self).__init__()\n self.conv1 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.conv2 = nn.Conv3d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,\n padding=padding)\n self.norm1 = BatchNorm3d(in_features, affine=True)\n self.norm2 = BatchNorm3d(in_features, affine=True)\n\n def forward(self, x):\n out = self.norm1(x)\n out = F.relu(out)\n out = self.conv1(out)\n out = self.norm2(out)\n out = F.relu(out)\n out = self.conv2(out)\n out += x\n return out"
},
{
"identifier": "SPADEResnetBlock",
"path": "src/facerender/modules/util.py",
"snippet": "class SPADEResnetBlock(nn.Module):\n def __init__(self, fin, fout, norm_G, label_nc, use_se=False, dilation=1):\n super().__init__()\n # Attributes\n self.learned_shortcut = (fin != fout)\n fmiddle = min(fin, fout)\n self.use_se = use_se\n # create conv layers\n self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation)\n self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation)\n if self.learned_shortcut:\n self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)\n # apply spectral norm if specified\n if 'spectral' in norm_G:\n self.conv_0 = spectral_norm(self.conv_0)\n self.conv_1 = spectral_norm(self.conv_1)\n if self.learned_shortcut:\n self.conv_s = spectral_norm(self.conv_s)\n # define normalization layers\n self.norm_0 = SPADE(fin, label_nc)\n self.norm_1 = SPADE(fmiddle, label_nc)\n if self.learned_shortcut:\n self.norm_s = SPADE(fin, label_nc)\n\n def forward(self, x, seg1):\n x_s = self.shortcut(x, seg1)\n dx = self.conv_0(self.actvn(self.norm_0(x, seg1)))\n dx = self.conv_1(self.actvn(self.norm_1(dx, seg1)))\n out = x_s + dx\n return out\n\n def shortcut(self, x, seg1):\n if self.learned_shortcut:\n x_s = self.conv_s(self.norm_s(x, seg1))\n else:\n x_s = x\n return x_s\n\n def actvn(self, x):\n return F.leaky_relu(x, 2e-1)"
},
{
"identifier": "DenseMotionNetwork",
"path": "src/facerender/modules/dense_motion.py",
"snippet": "class DenseMotionNetwork(nn.Module):\n \"\"\"\n Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving\n \"\"\"\n\n def __init__(self, block_expansion, num_blocks, max_features, num_kp, feature_channel, reshape_depth, compress,\n estimate_occlusion_map=False):\n super(DenseMotionNetwork, self).__init__()\n # self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(feature_channel+1), max_features=max_features, num_blocks=num_blocks)\n self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp+1)*(compress+1), max_features=max_features, num_blocks=num_blocks)\n\n self.mask = nn.Conv3d(self.hourglass.out_filters, num_kp + 1, kernel_size=7, padding=3)\n\n self.compress = nn.Conv3d(feature_channel, compress, kernel_size=1)\n self.norm = BatchNorm3d(compress, affine=True)\n\n if estimate_occlusion_map:\n # self.occlusion = nn.Conv2d(reshape_channel*reshape_depth, 1, kernel_size=7, padding=3)\n self.occlusion = nn.Conv2d(self.hourglass.out_filters*reshape_depth, 1, kernel_size=7, padding=3)\n else:\n self.occlusion = None\n\n self.num_kp = num_kp\n\n\n def create_sparse_motions(self, feature, kp_driving, kp_source):\n bs, _, d, h, w = feature.shape\n identity_grid = make_coordinate_grid((d, h, w), type=kp_source['value'].type())\n identity_grid = identity_grid.view(1, 1, d, h, w, 3)\n coordinate_grid = identity_grid - kp_driving['value'].view(bs, self.num_kp, 1, 1, 1, 3)\n \n # if 'jacobian' in kp_driving:\n if 'jacobian' in kp_driving and kp_driving['jacobian'] is not None:\n jacobian = torch.matmul(kp_source['jacobian'], torch.inverse(kp_driving['jacobian']))\n jacobian = jacobian.unsqueeze(-3).unsqueeze(-3).unsqueeze(-3)\n jacobian = jacobian.repeat(1, 1, d, h, w, 1, 1)\n coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))\n coordinate_grid = coordinate_grid.squeeze(-1) \n\n\n driving_to_source = coordinate_grid + kp_source['value'].view(bs, self.num_kp, 1, 1, 1, 3) # (bs, num_kp, d, h, w, 3)\n\n #adding background feature\n identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1, 1)\n sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1) #bs num_kp+1 d h w 3\n \n # sparse_motions = driving_to_source\n\n return sparse_motions\n\n def create_deformed_feature(self, feature, sparse_motions):\n bs, _, d, h, w = feature.shape\n feature_repeat = feature.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp+1, 1, 1, 1, 1, 1) # (bs, num_kp+1, 1, c, d, h, w)\n feature_repeat = feature_repeat.view(bs * (self.num_kp+1), -1, d, h, w) # (bs*(num_kp+1), c, d, h, w)\n sparse_motions = sparse_motions.view((bs * (self.num_kp+1), d, h, w, -1)) # (bs*(num_kp+1), d, h, w, 3) !!!!\n sparse_deformed = F.grid_sample(feature_repeat, sparse_motions)\n sparse_deformed = sparse_deformed.view((bs, self.num_kp+1, -1, d, h, w)) # (bs, num_kp+1, c, d, h, w)\n return sparse_deformed\n\n def create_heatmap_representations(self, feature, kp_driving, kp_source):\n spatial_size = feature.shape[3:]\n gaussian_driving = kp2gaussian(kp_driving, spatial_size=spatial_size, kp_variance=0.01)\n gaussian_source = kp2gaussian(kp_source, spatial_size=spatial_size, kp_variance=0.01)\n heatmap = gaussian_driving - gaussian_source\n\n # adding background feature\n zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1], spatial_size[2]).type(heatmap.type())\n heatmap = torch.cat([zeros, heatmap], dim=1)\n heatmap = heatmap.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)\n return heatmap\n\n def forward(self, feature, kp_driving, kp_source):\n bs, _, d, h, w = feature.shape\n\n feature = self.compress(feature)\n feature = self.norm(feature)\n feature = F.relu(feature)\n\n out_dict = dict()\n sparse_motion = self.create_sparse_motions(feature, kp_driving, kp_source)\n deformed_feature = self.create_deformed_feature(feature, sparse_motion)\n\n heatmap = self.create_heatmap_representations(deformed_feature, kp_driving, kp_source)\n\n input_ = torch.cat([heatmap, deformed_feature], dim=2)\n input_ = input_.view(bs, -1, d, h, w)\n\n # input = deformed_feature.view(bs, -1, d, h, w) # (bs, num_kp+1 * c, d, h, w)\n\n prediction = self.hourglass(input_)\n\n\n mask = self.mask(prediction)\n mask = F.softmax(mask, dim=1)\n out_dict['mask'] = mask\n mask = mask.unsqueeze(2) # (bs, num_kp+1, 1, d, h, w)\n \n zeros_mask = torch.zeros_like(mask) \n mask = torch.where(mask < 1e-3, zeros_mask, mask) \n\n sparse_motion = sparse_motion.permute(0, 1, 5, 2, 3, 4) # (bs, num_kp+1, 3, d, h, w)\n deformation = (sparse_motion * mask).sum(dim=1) # (bs, 3, d, h, w)\n deformation = deformation.permute(0, 2, 3, 4, 1) # (bs, d, h, w, 3)\n\n out_dict['deformation'] = deformation\n\n if self.occlusion:\n bs, c, d, h, w = prediction.shape\n prediction = prediction.view(bs, -1, h, w)\n occlusion_map = torch.sigmoid(self.occlusion(prediction))\n out_dict['occlusion_map'] = occlusion_map\n\n return out_dict"
}
] | import torch
import torch.nn.functional as F
from torch import nn
from src.facerender.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock
from src.facerender.modules.dense_motion import DenseMotionNetwork | 4,623 | """
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
self.down_blocks = nn.ModuleList(down_blocks)
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
self.reshape_channel = reshape_channel
self.reshape_depth = reshape_depth
self.resblocks_3d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
out_features = block_expansion * (2 ** (num_down_blocks))
self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)
self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)
self.resblocks_2d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))
up_blocks = []
for i in range(num_down_blocks):
in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))
out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1)))
up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
self.up_blocks = nn.ModuleList(up_blocks)
self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3))
self.estimate_occlusion_map = estimate_occlusion_map
self.image_channel = image_channel
def deform_input(self, inp, deformation):
_, d_old, h_old, w_old, _ = deformation.shape
_, _, d, h, w = inp.shape
if d_old != d or h_old != h or w_old != w:
deformation = deformation.permute(0, 4, 1, 2, 3)
deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')
deformation = deformation.permute(0, 2, 3, 4, 1)
return F.grid_sample(inp, deformation)
def forward(self, source_image, kp_driving, kp_source):
# Encoding (downsampling) part
out = self.first(source_image)
for i in range(len(self.down_blocks)):
out = self.down_blocks[i](out)
out = self.second(out)
bs, c, h, w = out.shape
# print(out.shape)
feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w)
feature_3d = self.resblocks_3d(feature_3d)
# Transforming feature representation according to deformation and occlusion
output_dict = {}
if self.dense_motion_network is not None:
dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,
kp_source=kp_source)
output_dict['mask'] = dense_motion['mask']
if 'occlusion_map' in dense_motion:
occlusion_map = dense_motion['occlusion_map']
output_dict['occlusion_map'] = occlusion_map
else:
occlusion_map = None
deformation = dense_motion['deformation']
out = self.deform_input(feature_3d, deformation)
bs, c, d, h, w = out.shape
out = out.view(bs, c*d, h, w)
out = self.third(out)
out = self.fourth(out)
if occlusion_map is not None:
if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:
occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')
out = out * occlusion_map
# output_dict["deformed"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image
# Decoding part
out = self.resblocks_2d(out)
for i in range(len(self.up_blocks)):
out = self.up_blocks[i](out)
out = self.final(out)
out = F.sigmoid(out)
output_dict["prediction"] = out
return output_dict
class SPADEDecoder(nn.Module):
def __init__(self):
super().__init__()
ic = 256
oc = 64
norm_G = 'spadespectralinstance'
label_nc = 256
self.fc = nn.Conv2d(ic, 2 * ic, 3, padding=1)
|
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
self.down_blocks = nn.ModuleList(down_blocks)
self.second = nn.Conv2d(in_channels=out_features, out_channels=max_features, kernel_size=1, stride=1)
self.reshape_channel = reshape_channel
self.reshape_depth = reshape_depth
self.resblocks_3d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_3d.add_module('3dr' + str(i), ResBlock3d(reshape_channel, kernel_size=3, padding=1))
out_features = block_expansion * (2 ** (num_down_blocks))
self.third = SameBlock2d(max_features, out_features, kernel_size=(3, 3), padding=(1, 1), lrelu=True)
self.fourth = nn.Conv2d(in_channels=out_features, out_channels=out_features, kernel_size=1, stride=1)
self.resblocks_2d = torch.nn.Sequential()
for i in range(num_resblocks):
self.resblocks_2d.add_module('2dr' + str(i), ResBlock2d(out_features, kernel_size=3, padding=1))
up_blocks = []
for i in range(num_down_blocks):
in_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i)))
out_features = max(block_expansion, block_expansion * (2 ** (num_down_blocks - i - 1)))
up_blocks.append(UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1)))
self.up_blocks = nn.ModuleList(up_blocks)
self.final = nn.Conv2d(block_expansion, image_channel, kernel_size=(7, 7), padding=(3, 3))
self.estimate_occlusion_map = estimate_occlusion_map
self.image_channel = image_channel
def deform_input(self, inp, deformation):
_, d_old, h_old, w_old, _ = deformation.shape
_, _, d, h, w = inp.shape
if d_old != d or h_old != h or w_old != w:
deformation = deformation.permute(0, 4, 1, 2, 3)
deformation = F.interpolate(deformation, size=(d, h, w), mode='trilinear')
deformation = deformation.permute(0, 2, 3, 4, 1)
return F.grid_sample(inp, deformation)
def forward(self, source_image, kp_driving, kp_source):
# Encoding (downsampling) part
out = self.first(source_image)
for i in range(len(self.down_blocks)):
out = self.down_blocks[i](out)
out = self.second(out)
bs, c, h, w = out.shape
# print(out.shape)
feature_3d = out.view(bs, self.reshape_channel, self.reshape_depth, h ,w)
feature_3d = self.resblocks_3d(feature_3d)
# Transforming feature representation according to deformation and occlusion
output_dict = {}
if self.dense_motion_network is not None:
dense_motion = self.dense_motion_network(feature=feature_3d, kp_driving=kp_driving,
kp_source=kp_source)
output_dict['mask'] = dense_motion['mask']
if 'occlusion_map' in dense_motion:
occlusion_map = dense_motion['occlusion_map']
output_dict['occlusion_map'] = occlusion_map
else:
occlusion_map = None
deformation = dense_motion['deformation']
out = self.deform_input(feature_3d, deformation)
bs, c, d, h, w = out.shape
out = out.view(bs, c*d, h, w)
out = self.third(out)
out = self.fourth(out)
if occlusion_map is not None:
if out.shape[2] != occlusion_map.shape[2] or out.shape[3] != occlusion_map.shape[3]:
occlusion_map = F.interpolate(occlusion_map, size=out.shape[2:], mode='bilinear')
out = out * occlusion_map
# output_dict["deformed"] = self.deform_input(source_image, deformation) # 3d deformation cannot deform 2d image
# Decoding part
out = self.resblocks_2d(out)
for i in range(len(self.up_blocks)):
out = self.up_blocks[i](out)
out = self.final(out)
out = F.sigmoid(out)
output_dict["prediction"] = out
return output_dict
class SPADEDecoder(nn.Module):
def __init__(self):
super().__init__()
ic = 256
oc = 64
norm_G = 'spadespectralinstance'
label_nc = 256
self.fc = nn.Conv2d(ic, 2 * ic, 3, padding=1) | self.G_middle_0 = SPADEResnetBlock(2 * ic, 2 * ic, norm_G, label_nc) | 5 | 2023-12-19 11:01:35+00:00 | 8k |
Westlake-geeks/bilibili-livestream-slicer | main.py | [
{
"identifier": "is_live",
"path": "api.py",
"snippet": "def is_live(uid):\n live_api = \"https://api.live.bilibili.com/room/v1/Room/room_init?id=%s\" % str(\n uid)\n rtn = my_request(live_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n live_status_value = data_value.get('live_status')\n if live_status_value:\n return True\n else:\n return False"
},
{
"identifier": "get_stream_url",
"path": "api.py",
"snippet": "def get_stream_url(uid):\n stream_api = \"https://api.live.bilibili.com/room/v1/Room/playUrl?cid=%s&quality=4&platform=web\" % uid\n\n rtn = my_request(stream_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n durl_value = data_value.get('durl')\n\n headers = dict()\n headers['cookie'] = r\"buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1\"\n headers['Accept-Encoding'] = 'identity'\n headers['referer'] = 'https://www.bilibili.com/video/BV1XF411C7xh/?spm_id_from=333.1007.tianma.1-3-3.click&vd_source=d4827c2f1802c9c5b667bc324c406c18'\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47'\n\n retry_time = 0\n return durl_value, headers\n if durl_value:\n try:\n return durl_value, headers\n except Exception as e:\n time.sleep(1)\n print(\"retry\", retry_time,\n \"]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\")\n print(e)\n retry_time += 1\n pass"
},
{
"identifier": "get_name",
"path": "api.py",
"snippet": "def get_name(uid):\n live_api = \"https://api.live.bilibili.com/room/v1/Room/room_init?id=%s\" % str(\n uid)\n rtn = my_request(live_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n duid_value = data_value.get('uid')\n\n home_url = \"https://space.bilibili.com/%s/\" % duid_value\n\n headers = {\n 'cookie': \"buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1\",\n # 'referer': \"https://space.bilibili.com/353609978/\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47\"\n }\n response = requests.get(url=home_url, headers=headers)\n user_name = re.findall(r'<title>(.*?)的个人空间', response.text)[0]\n if user_name:\n return (user_name)\n else:\n return (\"未找到指定用户名称\")"
},
{
"identifier": "my_request",
"path": "api.py",
"snippet": "def my_request(url):\n headers = dict()\n headers['cookie'] = r\"buvid_fp_plain=undefined; CURRENT_BLACKGAP=0; blackside_state=0; LIVE_BUVID=AUTO2616596088417426; rpdid=|(k|m|))Y~k~0J'uYY)lmlul~; hit-new-style-dyn=1; go-back-dyn=1; is-2022-channel=1; header_theme_version=CLOSE; CURRENT_PID=b03f3c10-ceb5-11ed-b59d-47f8dacf4eec; FEED_LIVE_VERSION=V8; buvid3=103FCEA2-4D34-4196-5E7B-7321C8A1082118620infoc; b_nut=1690476718; _uuid=B1038F2AB-E8CD-29A2-4728-F82FE285F59D84428infoc; buvid4=CFCD8B8D-0FCC-F601-2753-DA825E11CFE613020-022072800-fr%2BgMSZdqRJTFAAYsS9ACQ%3D%3D; i-wanna-go-back=-1; b_ut=5; hit-dyn-v2=1; i-wanna-go-feeds=2; DedeUserID=325718681; DedeUserID__ckMd5=319313351948fd48; CURRENT_QUALITY=116; SESSDATA=c555e98c%2C1711883936%2Caf616%2Aa2CjAD_KFN4n_1-0P_VrGmaHuTOhode3kKsjtR7Aq0iz1U5TFRzKUl69JUDZ-5W532pswSVkFKMUpyQkQ3NmlWYldjLWtnSG9hcG9lQ1RYa0VKaEh3TFlybGxjdlpJQkkwekYwYy0tckZhc1d3eWlrT1k2NHpvQmQtS1MtUGlxU2RxdEM2UFcyWWlnIIEC; bili_jct=f30d6a38050b9fd22f87748b88e5c40f; sid=8nj7ny5x; bili_ticket=eyJhbGciOiJIUzI1NiIsImtpZCI6InMwMyIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTY2MDgwNDYsImlhdCI6MTY5NjM0ODc4NiwicGx0IjotMX0.P976bqS0e1zm2k4khjnX5aqxWCmSIE-zA6MlVXq32wo; bili_ticket_expires=1696607986; fingerprint=c2d58d86c60e35d56558bf9942a9deac; CURRENT_FNVAL=4048; home_feed_column=5; browser_resolution=1699-945; share_source_origin=WEIXIN; bsource=share_source_weixinchat; bp_video_offset_325718681=849021837940621320; buvid_fp=c2d58d86c60e35d56558bf9942a9deac; b_lsid=5469973A_18B009161BC; PVID=1\"\n headers['Accept-Encoding'] = 'identity'\n headers['referer'] = 'https://www.bilibili.com/video/BV1XF411C7xh/?spm_id_from=333.1007.tianma.1-3-3.click&vd_source=d4827c2f1802c9c5b667bc324c406c18'\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.47'\n\n response = requests.get(url=url, headers=headers)\n return response.text"
}
] | import os
import json
import traceback
import sys
import re
import streamlink
import threading
import requests
import time
import datetime
import urllib
import socket
from api import is_live, get_stream_url, get_name, my_request
from urllib import request
| 3,682 |
socket.setdefaulttimeout(5.0)
def record(real_url, file_name, headers):
if not real_url:
return
res = None
try:
with urllib.request.urlopen(urllib.request.Request(real_url, headers=headers)) as response:
size = 0
with open(file_name, 'wb') as f:
print('starting download from:\n%s\nto:\n%s' %
(real_url, file_name))
chunk_size = 64*1024
while True:
chunk = response.read(chunk_size)
if not chunk:
print('连接中断')
break
f.write(chunk)
#size += len(chunk)
#print('{:<4.2f} MB downloaded'.format(
# size/1024/1024), datetime.datetime.now(), end="\r")
except Exception as e:
print("=============================")
print(e)
print("=============================")
finally:
print("finnally")
if res:
res.close()
print("res.close()")
if os.path.isfile(file_name) and os.path.getsize(file_name) == 0:
os.remove(file_name)
print("os.remove(file_name)")
def __main__(id,filename):
#conf = json.load(open("_config.json"))
_id = id
_name = get_name(int(_id))
_path = "videos/"
if not os.path.exists(_path):
raise "path not exists"
while 1:
try:
live_status = is_live(int(_id))
print('live_status:', live_status)
except Exception as e:
print(e)
continue
if live_status == False:
print("[%s]未开播" % _id, datetime.datetime.now(), end="\r")
time.sleep(5)
pass
else:
# send_wechat_notification(_name+' '+'标题占位', '直播链接占位00000')
try:
|
socket.setdefaulttimeout(5.0)
def record(real_url, file_name, headers):
if not real_url:
return
res = None
try:
with urllib.request.urlopen(urllib.request.Request(real_url, headers=headers)) as response:
size = 0
with open(file_name, 'wb') as f:
print('starting download from:\n%s\nto:\n%s' %
(real_url, file_name))
chunk_size = 64*1024
while True:
chunk = response.read(chunk_size)
if not chunk:
print('连接中断')
break
f.write(chunk)
#size += len(chunk)
#print('{:<4.2f} MB downloaded'.format(
# size/1024/1024), datetime.datetime.now(), end="\r")
except Exception as e:
print("=============================")
print(e)
print("=============================")
finally:
print("finnally")
if res:
res.close()
print("res.close()")
if os.path.isfile(file_name) and os.path.getsize(file_name) == 0:
os.remove(file_name)
print("os.remove(file_name)")
def __main__(id,filename):
#conf = json.load(open("_config.json"))
_id = id
_name = get_name(int(_id))
_path = "videos/"
if not os.path.exists(_path):
raise "path not exists"
while 1:
try:
live_status = is_live(int(_id))
print('live_status:', live_status)
except Exception as e:
print(e)
continue
if live_status == False:
print("[%s]未开播" % _id, datetime.datetime.now(), end="\r")
time.sleep(5)
pass
else:
# send_wechat_notification(_name+' '+'标题占位', '直播链接占位00000')
try:
| stream_url, headers = get_stream_url(_id)
| 1 | 2023-12-16 17:08:02+00:00 | 8k |
Angryrou/udao | udao/optimization/moo/progressive_frontier/parallel_progressive_frontier.py | [
{
"identifier": "logger",
"path": "udao/utils/logging.py",
"snippet": "def _get_logger(name: str = \"udao\", level: int = logging.DEBUG) -> logging.Logger:"
},
{
"identifier": "Objective",
"path": "udao/optimization/concepts/objective.py",
"snippet": "class Objective(Constraint):\n \"\"\"\n\n Parameters\n ----------\n name : str\n Name of the objective.\n minimize : bool\n Direction of the objective: if True, minimize, else maximize.\n type: VarTypes\n Type of the objective, by default VarTypes.FLOAT\n \"\"\"\n\n def __init__(\n self,\n name: str,\n minimize: bool,\n function: Union[UdaoFunction, th.nn.Module, Callable[..., th.Tensor]],\n lower: Optional[float] = None,\n upper: Optional[float] = None,\n type: VarTypes = VarTypes.FLOAT,\n ):\n super().__init__(function=function, lower=lower, upper=upper)\n self.name = name\n self.minimize = minimize\n self.type = type\n\n @property\n def direction(self) -> int:\n \"\"\"Get gradient direction from optimization type\"\"\"\n if self.minimize:\n return 1\n else:\n return -1\n\n def __repr__(self) -> str:\n return (\n f\"Objective(name={self.name}, \"\n f\"direction={'min' if self.minimize else 'max'}, \"\n f\"lower={self.lower}, upper={self.upper})\"\n )"
},
{
"identifier": "MOProblem",
"path": "udao/optimization/concepts/problem.py",
"snippet": "class MOProblem(BaseProblem):\n \"\"\"Multi-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objectives: Sequence[Objective],\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objectives = objectives\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"MOProblem(objectives={self.objectives}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )"
},
{
"identifier": "SOProblem",
"path": "udao/optimization/concepts/problem.py",
"snippet": "class SOProblem(BaseProblem):\n \"\"\"Single-objective optimization problem.\"\"\"\n\n def __init__(\n self,\n objective: Objective,\n variables: Dict[str, Variable],\n constraints: Sequence[Constraint],\n data_processor: Optional[DataProcessor] = None,\n input_parameters: Optional[Dict[str, Any]] = None,\n ) -> None:\n self.objective = objective\n super().__init__(\n variables,\n constraints,\n data_processor=data_processor,\n input_parameters=input_parameters,\n )\n\n def __repr__(self) -> str:\n return (\n f\"SOProblem(objective={self.objective}, \"\n f\"variables={self.variables}, \"\n f\"constraints={self.constraints}, \"\n f\"input_parameters={self.input_parameters})\"\n )"
},
{
"identifier": "SOSolver",
"path": "udao/optimization/soo/so_solver.py",
"snippet": "class SOSolver(ABC):\n @abstractmethod\n def solve(\n self,\n problem: SOProblem,\n seed: Optional[int] = None,\n ) -> Tuple[float, Dict[str, float]]:\n \"\"\"Solve a single-objective optimization problem\n\n Parameters\n ----------\n problem : SOProblem\n Single-objective optimization problem to solve\n seed : Optional[int], optional\n Random seed, by default None\n\n Returns\n -------\n Tuple[float, Dict[str, float]]\n A tuple of the objective value and the variables\n that optimize the objective\n \"\"\"\n ..."
},
{
"identifier": "moo_utils",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "class Point:\nclass Rectangle:\n def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:\n def __repr__(self) -> str:\n def __eq__(self, other: \"Point\") -> bool: # type: ignore\n def __init__(self, utopia: Point, nadir: Point) -> None:\n def __repr__(self) -> str:\n def cal_volume(self, upper_bounds: np.ndarray, lower_bounds: np.ndarray) -> float:\n def __lt__(self, other: \"Rectangle\") -> bool:\n def __eq__(self, other: \"Rectangle\") -> bool: # type: ignore\ndef is_pareto_efficient(costs: np.ndarray, return_mask: bool = True) -> np.ndarray:\ndef summarize_ret(\n po_obj_list: Sequence, po_var_list: Sequence\n) -> Tuple[np.ndarray, np.ndarray]:\ndef even_weights(stepsize: float, n_objectives: int) -> np.ndarray:\ndef plot_po(po: np.ndarray, n_obj: int = 2, title: str = \"pf_ap\") -> None:\ndef get_default_device() -> th.device:"
},
{
"identifier": "NoSolutionError",
"path": "udao/optimization/utils/exceptions.py",
"snippet": "class NoSolutionError(ValueError):\n \"Raised when no solution is found for an MOO problem\"\n ..."
},
{
"identifier": "Point",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "class Point:\n def __init__(self, objs: np.ndarray, vars: Optional[Dict] = None) -> None:\n \"\"\"\n A point in the objective space.\n Variables are optional, and are not specified for imaginary points\n (e.g., utopia and nadir)\n\n Parameters\n ----------\n objs : np.ndarray\n Array of objective values of shape (n_objs,)\n vars :np.ndarray, optional\n Array of variable values of shape (n_vars,), by default None\n \"\"\"\n self.objs = objs\n self.vars = vars\n self.n_objs = objs.shape[0]\n\n def __repr__(self) -> str:\n return f\"Point(objs={self.objs}, vars={self.vars})\"\n\n def __eq__(self, other: \"Point\") -> bool: # type: ignore\n return bool(np.all(self.objs == other.objs) and np.all(self.vars == other.vars))"
},
{
"identifier": "Rectangle",
"path": "udao/optimization/utils/moo_utils.py",
"snippet": "class Rectangle:\n def __init__(self, utopia: Point, nadir: Point) -> None:\n \"\"\"\n\n Parameters\n ----------\n utopia : Points\n utopia point\n nadir : Points\n nadir point\n \"\"\"\n\n self.upper_bounds = nadir.objs\n self.lower_bounds = utopia.objs\n self.n_objs = nadir.objs.shape[0]\n self.volume = self.cal_volume(nadir.objs, utopia.objs)\n self.neg_vol = -self.volume\n self.utopia = utopia\n self.nadir = nadir\n\n def __repr__(self) -> str:\n return f\"Rectangle(utopia={self.utopia}, nadir={self.nadir})\"\n\n def cal_volume(self, upper_bounds: np.ndarray, lower_bounds: np.ndarray) -> float:\n \"\"\"\n Calculate the volume of the hyper_rectangle\n\n Parameters\n ----------\n upper_bounds : np.ndarray(\n Array of upper bounds of the hyper_rectangle, of shape (n_objs,)\n lower_bounds : np.ndarrays\n Array of lower bounds of the hyper_rectangle of shape (n_objs,)\n\n Returns\n -------\n float\n volume of the hyper_rectangle\n \"\"\"\n volume = np.abs(np.prod(upper_bounds - lower_bounds))\n return volume\n\n # Override the `__lt__()` function to make `Rectangles`\n # class work with min-heap (referred from VLDB2022)\n def __lt__(self, other: \"Rectangle\") -> bool:\n return self.neg_vol < other.neg_vol\n\n def __eq__(self, other: \"Rectangle\") -> bool: # type: ignore\n return bool(\n np.all(self.upper_bounds == other.upper_bounds)\n and np.all(self.lower_bounds == other.lower_bounds)\n )"
},
{
"identifier": "BaseProgressiveFrontier",
"path": "udao/optimization/moo/progressive_frontier/base_progressive_frontier.py",
"snippet": "class BaseProgressiveFrontier(MOSolver, ABC):\n \"\"\"\n Base class for Progressive Frontier.\n Includes the common methods for Progressive Frontier.\n \"\"\"\n\n @dataclass\n class Params:\n \"\"\"Parameters for Progressive Frontier\"\"\"\n\n pass\n\n def __init__(\n self,\n solver: SOSolver,\n params: Params,\n ) -> None:\n super().__init__()\n self.solver = solver\n self.opt_obj_ind = 0\n\n def get_anchor_point(\n self,\n problem: MOProblem,\n obj_ind: int,\n seed: Optional[int] = None,\n ) -> Point:\n \"\"\"\n Find the anchor point for the given objective,\n by unbounded single objective optimization\n\n Parameters\n ----------\n problem : MOProblem\n MOO problem in which the objective is to be optimized\n obj_ind : int\n index of the objective to be optimized\n Returns\n -------\n Point\n anchor point for the given objective\n \"\"\"\n try:\n _, soo_vars = self.solver.solve(\n problem.derive_SO_problem(\n objective=problem.objectives[obj_ind],\n ),\n seed=seed,\n )\n except NoSolutionError:\n raise NoSolutionError(\"Cannot find anchor points.\")\n else:\n objs = self._compute_objectives(problem, soo_vars)\n\n # If the current objective type is Integer,\n # further find the optimal value for other objectives with float type\n if problem.objectives[obj_ind].type == VarTypes.INT:\n utopia_init = np.array(\n [0 if i != obj_ind else objs[obj_ind] for i in problem.objectives]\n )\n utopia_tmp, nadir_tmp = Point(objs=utopia_init), Point(objs=objs)\n # select the first objective with float type\n float_obj_ind = [\n i\n for i, objective in enumerate(problem.objectives)\n if objective.type == VarTypes.FLOAT\n ][0]\n obj_bounds_dict_so = self._form_obj_bounds_dict(\n problem, utopia_tmp, nadir_tmp\n )\n so_problem = self._so_problem_from_bounds_dict(\n problem, obj_bounds_dict_so, problem.objectives[float_obj_ind]\n )\n try:\n _, soo_vars_update = self.solver.solve(so_problem, seed=seed)\n except NoSolutionError:\n raise NoSolutionError(\"Cannot find anchor points.\")\n else:\n logger.debug(f\"soo_vars_update is: {soo_vars_update}\")\n objs = self._compute_objectives(problem, soo_vars_update)\n\n return Point(objs, soo_vars_update)\n else:\n return Point(objs, soo_vars)\n\n def _form_obj_bounds_dict(\n self, problem: MOProblem, utopia: Point, nadir: Point\n ) -> dict[str, list[float]]:\n \"\"\"\n form the dict used in the constrained optimization\n e.g. the format:\n obj_bounds_dict = {\n \"latency\": [0, 10000000],\n \"cores\": [0, 58]\n }\n Parameters\n ----------\n utopia: Point\n the utopia point\n nadir: Point\n the nadir point\n opt_obj_ind: int\n the index of objective to be optimized\n\n Returns\n -------\n dict with upper and lower bound for each objective\n \"\"\"\n bounds = {}\n for i, objective in enumerate(problem.objectives):\n if objective.direction < 0:\n bounds[objective.name] = [\n nadir.objs[i] * objective.direction,\n utopia.objs[i] * objective.direction,\n ]\n else:\n bounds[objective.name] = [\n utopia.objs[i] * objective.direction,\n nadir.objs[i] * objective.direction,\n ]\n\n return bounds\n\n def _so_problem_from_bounds_dict(\n self,\n problem: MOProblem,\n obj_bounds_dict: dict[str, list],\n primary_obj: Objective,\n ) -> SOProblem:\n \"\"\"\n\n Parameters\n ----------\n obj_bounds_dict : dict[str, list]\n A lower and upper bound for each objective\n primary_obj : Objective\n The objective to be optimized\n\n Returns\n -------\n Tuple[Objective, Sequence[Constraint]]\n The objective and constraints for the single-objective optimization\n \"\"\"\n soo_objective = Objective(\n name=primary_obj.name,\n minimize=primary_obj.minimize,\n function=primary_obj.function,\n lower=obj_bounds_dict[primary_obj.name][0],\n upper=obj_bounds_dict[primary_obj.name][1],\n )\n so_problem = problem.derive_SO_problem(soo_objective)\n soo_constraints = list(so_problem.constraints)\n\n for obj in problem.objectives:\n obj_name = obj.name\n if obj_name != primary_obj.name:\n soo_constraints.append(\n Objective(\n name=obj.name,\n minimize=obj.minimize,\n lower=obj_bounds_dict[obj_name][0],\n upper=obj_bounds_dict[obj_name][1],\n function=obj.function,\n )\n )\n so_problem.constraints = soo_constraints\n\n return so_problem\n\n @staticmethod\n def get_utopia_and_nadir(points: list[Point]) -> Tuple[Point, Point]:\n \"\"\"\n get the utopia and nadir points from a list of points\n Parameters\n ----------\n points: list[Point],\n each element is a Point (defined class).\n\n Returns\n -------\n Tuple[Point, Point]\n utopia and nadir point\n \"\"\"\n if len(points) == 0:\n raise ValueError(\"The input list of points is empty.\")\n n_objs = points[0].n_objs\n if any([point.n_objs != n_objs for point in points]):\n raise Exception(\"The number of objectives is not consistent among points.\")\n best_objs = [min([point.objs[i] for point in points]) for i in range(n_objs)]\n worst_objs = [max([point.objs[i] for point in points]) for i in range(n_objs)]\n logger.debug(f\"best_objs {best_objs}\")\n utopia = Point(np.array(best_objs))\n nadir = Point(np.array(worst_objs))\n\n return utopia, nadir\n\n def _compute_objectives(\n self,\n problem: MOProblem,\n variable_values: dict[str, Any],\n ) -> np.ndarray:\n \"\"\"Compute an array of objective for a given point.\n (variable_values is a dict of variable name and single value)\n\n Parameters\n ----------\n variable_values : dict[str, Any]\n Name: value of variables\n input_parameters : Optional[dict[str, Any]]\n Name: value of other fixed input parameters\n\n Returns\n -------\n np.ndarray\n _description_\n \"\"\"\n obj_list = []\n for obj in problem.objectives:\n obj_value = problem.apply_function(\n obj, variable_values, device=th.device(\"cpu\")\n )\n obj_value = (obj_value * obj.direction).squeeze()\n obj_list.append(obj_value.detach())\n return np.array(obj_list)"
}
] | import itertools
import numpy as np
import torch as th
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from torch.multiprocessing import Pool
from ....utils.logging import logger
from ...concepts import MOProblem, Objective, SOProblem
from ...soo.so_solver import SOSolver
from ...utils import moo_utils as moo_ut
from ...utils.exceptions import NoSolutionError
from ...utils.moo_utils import Point, Rectangle
from .base_progressive_frontier import BaseProgressiveFrontier | 3,922 |
class ParallelProgressiveFrontier(BaseProgressiveFrontier):
@dataclass
class Params(BaseProgressiveFrontier.Params):
processes: int = 1
"""Processes to use for parallel processing"""
n_grids: int = 2
"""Number of splits per objective"""
max_iters: int = 10
"""Number of iterations to explore the space"""
def __init__(
self,
|
class ParallelProgressiveFrontier(BaseProgressiveFrontier):
@dataclass
class Params(BaseProgressiveFrontier.Params):
processes: int = 1
"""Processes to use for parallel processing"""
n_grids: int = 2
"""Number of splits per objective"""
max_iters: int = 10
"""Number of iterations to explore the space"""
def __init__(
self, | solver: SOSolver, | 4 | 2023-12-20 09:10:42+00:00 | 8k |
XLearning-SCU/2023-TPAMI-SMILE | _Utils/VisualQ2.py | [
{
"identifier": "Queue",
"path": "_MainLauncher.py",
"snippet": "def get_settings():\r\ndef clear_gpu_fail(root):\r\ndef run():\r\ndef main():\r"
},
{
"identifier": "get_nearest_k",
"path": "_Utils/Calculator.py",
"snippet": "def get_nearest_k(h0, h1, k=1, sp_size=1000):\r\n hh0 = h0.half()\r\n hh1 = h1.half()\r\n split = int(np.ceil(len(hh0) / sp_size))\r\n near = []\r\n for i in range(split):\r\n dist = torch.cdist(hh0[i * sp_size:(i + 1) * sp_size], hh1)\r\n nearest = torch.argsort(dist, dim=1)[:, :k]\r\n near.append(nearest)\r\n nearest = torch.cat(near)\r\n return nearest\r"
},
{
"identifier": "visualize_scatter",
"path": "_Utils/Scatter.py",
"snippet": "def visualize_scatter(x, fig_path=None, label_color=None, show=False, s=1):\r\n \"\"\"\r\n :param show:\r\n :param fig_path:\r\n :param x: (n_samples, 2)\r\n :param label_color:\r\n :param label_shape: list of int\r\n :return:\r\n \"\"\"\r\n t = time.time()\r\n\r\n if label_color is None:\r\n label_color = [0] * len(x)\r\n color_num = len(np.unique(label_color))\r\n # if color_num <= 2:\r\n # cmap = None\r\n if color_num <= 10:\r\n cmap = 'tab10'\r\n elif color_num <= 20:\r\n cmap = 'tab20'\r\n else:\r\n cmap = 'gist_ncar'\r\n plt.figure(figsize=[5,5])\r\n if len(np.unique(label_color)) < 3:\r\n ind = np.arange(len(label_color))\r\n np.random.shuffle(ind)\r\n for idr in ind:\r\n plt.scatter(\r\n x[idr:idr+1, 0], x[idr:idr+1, 1], c=label_color[idr:idr+1], cmap=cmap,\r\n s=s,\r\n vmax=max(4, np.max(label_color)),\r\n vmin=min(4, np.min(label_color)),\r\n label=label_color[idr]\r\n )\r\n else:\r\n for c in np.unique(label_color):\r\n plt.scatter(\r\n x[:, 0][label_color == c], x[:, 1][label_color == c], c=label_color[label_color == c], cmap=cmap,\r\n s=s,\r\n vmax=max(4, np.max(label_color)),\r\n vmin=min(4, np.min(label_color)),\r\n label=c\r\n )\r\n # plt.legend(prop={'size': 6})\r\n # plt.legend()\r\n # plt.subplots_adjust(left=left / w, right=1, top=1, bottom=right / h)\r\n # plt.grid()\r\n\r\n plt.axis('off')\r\n plt.tight_layout()\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r\n if PrintTimer:\r\n print('VisualizeScatter finished with in {:.03f} seconds (x.shape == {}).'.format(\r\n time.time() - t,\r\n x.shape,\r\n ))\r"
},
{
"identifier": "visualize2",
"path": "_Utils/Scatter.py",
"snippet": "def visualize2(feature_vec, type_vec, group_vec, pred_vec, prefix, ):\r\n fv = feature_vec.reshape((len(feature_vec), -1))\r\n for perplexity in []:# 50\r\n vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(\r\n np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)\r\n )\r\n for s in [5]:\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea_multi,\r\n fig_path='{}Multi.svg'.format(prefix2),\r\n label_color=type_vec[group_vec == 0],\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n\r\n for perplexity in [50]:\r\n vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)\r\n for s in [5]: # 5\r\n prefix2 = prefix + 'P{}S{}'.format(perplexity, s)\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Type.svg'.format(prefix2),\r\n label_color=type_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r\n # visualize_scatter(vis_fea,\r\n # fig_path='{}Cluster.svg'.format(prefix),\r\n # label_color=pred_vec,\r\n # label_shape=type_vec,\r\n #\r\n # )\r\n visualize_scatter(vis_fea,\r\n fig_path='{}Group.svg'.format(prefix2),\r\n label_color=group_vec,\r\n # label_shape=type_vec,\r\n s=s\r\n )\r"
},
{
"identifier": "plot_heat_map",
"path": "_Utils/Visualize.py",
"snippet": "def plot_heat_map(z, xticks=None, yticks=None, xlabel=None, ylabel=None, title=None, show=False, fig_path=None):\r\n \"\"\"\r\n\r\n :param z: z[i,j] shown in i-th row, j-th line\r\n :param xlabel:\r\n :param ylabel:\r\n :param show:\r\n :param fig_path:\r\n :return:\r\n \"\"\"\r\n left = 0.15\r\n right = 1\r\n top = 0.95\r\n bottom = 0.15\r\n w, h = z.shape\r\n plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n\r\n # plt.figure(figsize=(w / (right - left), h / (top - bottom)))\r\n # plt.subplots_adjust(left=left, right=right, top=top, bottom=bottom)\r\n\r\n if xticks is not None:\r\n plt.xticks(np.arange(len(xticks)), np.round(xticks, 2), rotation=45)\r\n if yticks is not None:\r\n plt.yticks(np.arange(len(yticks)), np.round(yticks, 2))\r\n for i in range(z.shape[0]):\r\n for j in range(z.shape[1]):\r\n # plt.text(j, i, accs[i, j].round(2), ha=\"center\", va=\"center\", color=\"b\", fontsize=12,\r\n # fontname='Times New Roman')\r\n plt.text(j, i, z[i, j], ha=\"center\", va=\"center\")\r\n\r\n if xlabel is not None:\r\n plt.xlabel(xlabel)\r\n if ylabel is not None:\r\n plt.ylabel(ylabel)\r\n if title is not None:\r\n plt.title(title)\r\n plt.imshow(z, interpolation='nearest', aspect='auto')\r\n\r\n plt.colorbar()\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path, transparent=True)\r\n if show:\r\n plt.show()\r\n plt.close()\r"
},
{
"identifier": "visualize_plot",
"path": "_Utils/Visualize.py",
"snippet": "def visualize_plot(x, y, labels=None, show=False, fig_path=None, xlim=None):\r\n # print('ploting')\r\n st = time.time()\r\n plt.figure(figsize=(20, 10))\r\n if labels is None:\r\n labels = ['line{:02d}'.format(i) for i in range(len(x))]\r\n if not show and fig_path is None:\r\n fig_path = '../_fig/fig.jpg'\r\n for xi, yi, label in zip(x, y, labels):\r\n plt.plot(xi, yi, label=label)\r\n plt.legend(prop={'size': 6})\r\n plt.grid()\r\n if xlim is not None:\r\n plt.xlim((0, 600))\r\n\r\n if fig_path is not None:\r\n DirectoryOperator.FoldOperator(directory=fig_path).make_fold()\r\n plt.savefig(fig_path)\r\n if show:\r\n plt.show()\r\n plt.close()\r\n print('{} lines plotted in {} seconds.'.format(len(x), time.time() - st))\r"
}
] | import numpy as np
import pandas as pd
import torch
from matplotlib import pyplot as plt
from sklearn.manifold import TSNE
from _MainLauncher import Queue
from _Utils.Calculator import get_nearest_k
from _Utils.Scatter import visualize_scatter, visualize2
from _Utils.Visualize import plot_heat_map, visualize_plot
| 5,037 | # plt.text(j, i, z[i, j], ha="center", va="center")
plt.imshow(z, interpolation='nearest', aspect='auto')
# plt.colorbar()
plt.axis('off')
# plt.subplots_adjust(top=0.957, bottom=0.081, left=0.108, right=0.977, hspace=0.2, wspace=0.2)
# plt.show()
plt.tight_layout()
plt.savefig(
figp,
transparent=True)
print()
if use_mx:
break
# def scatter():
# mx = False
# if mx:
# np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SURE/NoisyMNISTBoth.npz'
# data_f = np.load(np_pth, allow_pickle=False)
# feature_vec = data_f['feature_vec']
# type_vec = data_f['type_vec']
# group_vec = np.concatenate((np.zeros(30000), np.ones(30000)))
# else:
# np_pth = 'D:/Pengxin/Workplace/Codes/MultimodalityClustering/Np099.npz'
# data_f = np.load(np_pth, allow_pickle=False)
# feature_vec = data_f['feature_vec']
# type_vec = data_f['type_vec']
# group_vec = data_f['group_vec']
#
# sub_sample = 3000
# if len(feature_vec) > sub_sample * 2:
# ind = np.arange(int(len(feature_vec) // 2))
# np.random.shuffle(ind)
# ind = ind[:sub_sample]
# ind = np.concatenate((ind, ind + 30000))
# feature_vec = feature_vec[ind]
# group_vec = group_vec[ind]
# type_vec = type_vec[ind]
# # fv = feature_vec.reshape((len(feature_vec), -1))
# prefix = 'D:/Pengxin/Temp/mxF/'
# visualize2(feature_vec, type_vec, group_vec, None, prefix, )
#
# # for perplexity in [15,30]:
# # prefix = 'D:/Pengxin/Temp/mxF/P{}N{}'.format(perplexity, sub_sample)
# # vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(
# # np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)
# # )
# # visualize_scatter(vis_fea_multi,
# # fig_path='{}Multi.{}'.format(prefix, post),
# # label_color=type_vec[group_vec == 0],
# # )
# #
# # vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)
# #
# # visualize_scatter(vis_fea,
# # fig_path='{}Type.{}'.format(prefix, post),
# # label_color=type_vec,
# #
# # )
# # # visualize_scatter(vis_fea,
# # # fig_path='{}Cluster.svg'.format(prefix),
# # # label_color=pred_vec,
# # # label_shape=type_vec,
# # #
# # # )
# # visualize_scatter(vis_fea,
# # fig_path='{}Group.{}'.format(prefix, post),
# # label_color=group_vec,
# #
# # )
def plot_nrmse():
rs = pd.read_csv('D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SMAIL/Recover/_Table_NRMSE_AVG_FuEp.csv')
# rs = pd.read_csv('/xlearning/pengxin/Codes/230318/IMvC_RunSet0405/_Table_NRMSE_AVG_FuEp.csv')
xx = []
yy = []
labels = []
spls = []
colors = []
for i, rec in enumerate(np.unique(rs.loc[:, 'Reconstruction'])):
if rec > 1:
continue
label = '$\mathcal{L}_{IsIL}@\lambda_{IsIL}$' + '={}'.format(rec)
rd = rs.loc[rs.loc[:, 'Reconstruction'] == rec]
x = rd.loc[:, 'Epoch']
y = rd.loc[:, 'loss_reconstruction_epoch']
xx.append(x)
yy.append(y)
labels.append(label)
spls.append('-')
colors.append('C{}'.format(i))
for i, rec in enumerate(np.unique(rs.loc[:, 'Reconstruction'])):
if rec > 1:
continue
label = '$NRMSE@\lambda_{IsIL}$' + '={}'.format(rec)
rd = rs.loc[rs.loc[:, 'Reconstruction'] == rec]
ind = pd.isna(rd.loc[:, 'rnmse0']).values != True
rd2 = rd.loc[ind]
ind = rd2.loc[:, 'Epoch'].values > 0
rd2 = rd2.loc[ind]
x = rd2.loc[:, 'Epoch']
y = rd2.loc[:, 'rnmse0']
xx.append(x)
yy.append(y)
labels.append(label)
spls.append('--')
colors.append('C{}'.format(i))
# np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SURE/nrmse_list.npz'
# data_f = np.load(np_pth, allow_pickle=True)
# data0 = np.asarray([it.cpu().numpy() for it in data_f['data0']])
# data1 = np.asarray([it.cpu().numpy() for it in data_f['data1']])
# y = (data1 + data0)/2
# x = np.arange(len(y))
# xx.append(x)
# yy.append(y)
# labels.append('SURE')
# spls.append('-.')
# colors.append('C{}'.format(6))
|
def confusion_matrix():
np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SMAIL/RaAlign/Base.npz'
data_mask_ispair = np.load(np_pth, allow_pickle=False)
mask = data_mask_ispair['mask']
use_mx = False
for ep in [59, 89, 119, 149]:
# for ep in [59, 89, 119]:
for tem in [50] if use_mx else [0.5]:
if not use_mx:
# 50A
# np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SMAIL/RaAlign/Np{:03d}.npz'.format(ep)
# is_pair = data_mask_ispair['is_pair_all']
# to_realign = np.logical_and(is_pair == 0, np.logical_and(mask[:, 1], mask[:, 0]))
# 100A
np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1025_BenchSotaCI22/ --QuickConfig A100 --Rev 1 --dataset NoisyMNIST30000 --normalize_type sample_wise --seed 9116/NpPoints/Np{:03d}.npz'.format(
ep)
to_realign = np.ones_like(data_mask_ispair['is_pair_all'], dtype=bool)
data_f = np.load(np_pth, allow_pickle=False)
feature_vec = data_f['feature_vec']
type_vec = data_f['type_vec']
group_vec = data_f['group_vec']
pred_vec = data_f['pred_vec']
epoch = data_f['epoch']
type_vec = type_vec[group_vec == 0]
h1 = feature_vec[group_vec == 1]
h0 = feature_vec[group_vec == 0]
ha0 = h0[to_realign]
ha1 = h1[to_realign]
gt = type_vec[to_realign]
# ind = np.argsort(gt)
# ha0 = ha0[ind]
# ha1 = ha1[ind]
# gt = gt[ind]
# ha0[np.sum(ha0 ** 2, axis=1) == 0] = 1e-8
# ha1[np.sum(ha1 ** 2, axis=1) == 0] = 1e-8
# ha0 = ha0 / np.sqrt(np.sum(ha0 ** 2, axis=1, keepdims=True))
# ha1 = ha1 / np.sqrt(np.sum(ha1 ** 2, axis=1, keepdims=True))
sim = torch.from_numpy(ha0 @ ha1.T)
# ind_score = np.zeros(len(ha0))
# for y in np.unique(gt):
# cy = conf[y == gt][:, y == gt]
# cy = (cy + cy.T) / 2
# ind_score[y == gt] = np.mean(cy, axis=1) - 10 * y
# ind = np.argsort(ind_score)[::-1]
# tem = 0.2
else:
# np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SURE/NoisyMNIST50align.npz'
# data_f = np.load(np_pth, allow_pickle=False)
np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SURE/NoisyMNIST50align.npz'
data_f = np.load(np_pth, allow_pickle=False)
feature_vec = data_f['feature_vec']
is_pair = data_f['is_pair']
h0 = feature_vec[:30000]
h1 = feature_vec[30000:]
type_vec0 = data_f['type_vec'][:30000]
type_vec1 = data_f['type_vec'][30000:]
to_realign = is_pair == 0
epoch = 0
ha0 = h0[to_realign]
ha1 = h1[to_realign]
gt0 = type_vec0[to_realign]
gt1 = type_vec1[to_realign]
ind = np.argsort(gt0)
ha0 = ha0[ind]
gt0 = gt0[ind]
ind = np.argsort(gt1)
ha1 = ha1[ind]
gt1 = gt1[ind]
assert np.sum(gt0 != gt1) == 0
gt = gt1
sim = -torch.cdist(torch.from_numpy(ha0), torch.from_numpy(ha1))
# tem = 1
figp = 'D:/Pengxin/Temp/AlignConfusionE{:03d}T{:05.01f}{:}.svg'.format(epoch, tem, 'M' if use_mx else '')
print(figp)
# feature_vec = torch.from_numpy(feature_vec)
# # fv = torch.cat([fv[:30000], fv[30000:]], dim=1)
# prefix = 'D:/Pengxin/Temp/mxv/'
# group_vec = np.concatenate([np.zeros(30000, dtype=int), np.ones(30000, dtype=int)])
# type_vec = data_f['type_vec']
# sub_sample = 3000
# if len(feature_vec) > sub_sample * 2:
# ind = np.arange(int(len(feature_vec) // 2))
# np.random.shuffle(ind)
# ind = ind[:sub_sample]
# ind += 30000
# # ind = np.concatenate((ind, ind + int(len(feature_vec) // 2)))
# feature_vec = feature_vec[ind]
# group_vec = group_vec[ind]
# type_vec = type_vec[ind]
# visualize2(feature_vec, type_vec,
# group_vec,
# None,
# prefix,
# )
sas01 = torch.softmax(sim / tem, dim=1)
sas10 = torch.softmax(sim / tem, dim=0)
sas = ((sas01 + sas10) / 2).numpy()
realign_y = gt[torch.argmax(sas01, dim=1)]
realign_gt = gt
acc = np.round(np.sum(realign_y == realign_gt) / len(realign_y) * 100, decimals=1)
print(acc)
continue
# break
# ind_score = np.zeros(len(ha0))
# for y in np.unique(gt):
# cy = sas[y == gt][:, y == gt]
# ind_score[y == gt] = np.mean(cy, axis=1) - 10 * y
# ind = np.argsort(ind_score)[::-1]
# conf = sas[ind][:, ind]
ind_score = np.zeros(len(ha0))
for y in np.unique(gt):
cy = sas[y == gt][:, y == gt]
ind_score[y == gt] = np.mean(cy, axis=1) - 10 * y
ind = np.argsort(ind_score)[::-1]
sas = sas[ind]
ind_score = np.zeros(len(ha0))
for y in np.unique(gt):
cy = sas[y == gt][:, y == gt]
ind_score[y == gt] = np.mean(cy, axis=0) - 10 * y
ind = np.argsort(ind_score)[::-1]
conf = sas[:, ind]
# conf = np.round(conf, decimals=2)
z = conf
# plot_heat_map(conf, show=True,
# fig_path='/D:/VirtualMachine/CheckPoints/MultiClustering/1014/FigNP/AlignConfusion.svg')
plt.figure()
# for i in range(z.shape[0]):
# for j in range(z.shape[1]):
# # plt.text(j, i, accs[i, j].round(2), ha="center", va="center", color="b", fontsize=12,
# # fontname='Times New Roman')
# plt.text(j, i, z[i, j], ha="center", va="center")
plt.imshow(z, interpolation='nearest', aspect='auto')
# plt.colorbar()
plt.axis('off')
# plt.subplots_adjust(top=0.957, bottom=0.081, left=0.108, right=0.977, hspace=0.2, wspace=0.2)
# plt.show()
plt.tight_layout()
plt.savefig(
figp,
transparent=True)
print()
if use_mx:
break
# def scatter():
# mx = False
# if mx:
# np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SURE/NoisyMNISTBoth.npz'
# data_f = np.load(np_pth, allow_pickle=False)
# feature_vec = data_f['feature_vec']
# type_vec = data_f['type_vec']
# group_vec = np.concatenate((np.zeros(30000), np.ones(30000)))
# else:
# np_pth = 'D:/Pengxin/Workplace/Codes/MultimodalityClustering/Np099.npz'
# data_f = np.load(np_pth, allow_pickle=False)
# feature_vec = data_f['feature_vec']
# type_vec = data_f['type_vec']
# group_vec = data_f['group_vec']
#
# sub_sample = 3000
# if len(feature_vec) > sub_sample * 2:
# ind = np.arange(int(len(feature_vec) // 2))
# np.random.shuffle(ind)
# ind = ind[:sub_sample]
# ind = np.concatenate((ind, ind + 30000))
# feature_vec = feature_vec[ind]
# group_vec = group_vec[ind]
# type_vec = type_vec[ind]
# # fv = feature_vec.reshape((len(feature_vec), -1))
# prefix = 'D:/Pengxin/Temp/mxF/'
# visualize2(feature_vec, type_vec, group_vec, None, prefix, )
#
# # for perplexity in [15,30]:
# # prefix = 'D:/Pengxin/Temp/mxF/P{}N{}'.format(perplexity, sub_sample)
# # vis_fea_multi = TSNE(perplexity=perplexity).fit_transform(
# # np.concatenate((fv[group_vec == 0], fv[group_vec == 1]), axis=1)
# # )
# # visualize_scatter(vis_fea_multi,
# # fig_path='{}Multi.{}'.format(prefix, post),
# # label_color=type_vec[group_vec == 0],
# # )
# #
# # vis_fea = TSNE(perplexity=perplexity).fit_transform(fv)
# #
# # visualize_scatter(vis_fea,
# # fig_path='{}Type.{}'.format(prefix, post),
# # label_color=type_vec,
# #
# # )
# # # visualize_scatter(vis_fea,
# # # fig_path='{}Cluster.svg'.format(prefix),
# # # label_color=pred_vec,
# # # label_shape=type_vec,
# # #
# # # )
# # visualize_scatter(vis_fea,
# # fig_path='{}Group.{}'.format(prefix, post),
# # label_color=group_vec,
# #
# # )
def plot_nrmse():
rs = pd.read_csv('D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SMAIL/Recover/_Table_NRMSE_AVG_FuEp.csv')
# rs = pd.read_csv('/xlearning/pengxin/Codes/230318/IMvC_RunSet0405/_Table_NRMSE_AVG_FuEp.csv')
xx = []
yy = []
labels = []
spls = []
colors = []
for i, rec in enumerate(np.unique(rs.loc[:, 'Reconstruction'])):
if rec > 1:
continue
label = '$\mathcal{L}_{IsIL}@\lambda_{IsIL}$' + '={}'.format(rec)
rd = rs.loc[rs.loc[:, 'Reconstruction'] == rec]
x = rd.loc[:, 'Epoch']
y = rd.loc[:, 'loss_reconstruction_epoch']
xx.append(x)
yy.append(y)
labels.append(label)
spls.append('-')
colors.append('C{}'.format(i))
for i, rec in enumerate(np.unique(rs.loc[:, 'Reconstruction'])):
if rec > 1:
continue
label = '$NRMSE@\lambda_{IsIL}$' + '={}'.format(rec)
rd = rs.loc[rs.loc[:, 'Reconstruction'] == rec]
ind = pd.isna(rd.loc[:, 'rnmse0']).values != True
rd2 = rd.loc[ind]
ind = rd2.loc[:, 'Epoch'].values > 0
rd2 = rd2.loc[ind]
x = rd2.loc[:, 'Epoch']
y = rd2.loc[:, 'rnmse0']
xx.append(x)
yy.append(y)
labels.append(label)
spls.append('--')
colors.append('C{}'.format(i))
# np_pth = 'D:/VirtualMachine/CheckPoints/MultiClustering/Repro/SURE/nrmse_list.npz'
# data_f = np.load(np_pth, allow_pickle=True)
# data0 = np.asarray([it.cpu().numpy() for it in data_f['data0']])
# data1 = np.asarray([it.cpu().numpy() for it in data_f['data1']])
# y = (data1 + data0)/2
# x = np.arange(len(y))
# xx.append(x)
# yy.append(y)
# labels.append('SURE')
# spls.append('-.')
# colors.append('C{}'.format(6))
| visualize_plot(xx, yy, labels=labels, show=True,
| 5 | 2023-12-21 08:50:36+00:00 | 8k |
botcs/wolfson-scheduler | tests/test_solver.py | [
{
"identifier": "unravel_indices",
"path": "solver.py",
"snippet": "def unravel_indices(indices, shape):\n coord = []\n\n for dim in reversed(shape):\n coord.append(indices % dim)\n indices = indices // dim\n\n coord = torch.stack(coord[::-1], dim=-1)\n\n return coord"
},
{
"identifier": "generalized_outer_addition",
"path": "solver.py",
"snippet": "def generalized_outer_addition(vectors, output=None):\n \"\"\"\n Corrected function to compute the outer addition of N K-dimensional vectors using broadcasting.\n This function is equivalent to the following code:\n ```\n result = torch.zeros((K1, K2, ..., KN))\n for idx1 in range(K1):\n for idx2 in range(K2):\n ...\n result[idx1, idx2, ..., idxn] = vectors[idx1] + vectors[idx2] + ... + vectors[idxn]\n ```\n However, it is much faster because it uses pre-computed sums and sums of squares.\n\n :param vectors: List of N vectors of shape (K1, K2, ..., KN)\n :param output: Optional output tensor\n if provided, must be of shape (K1, K2, ..., KN)\n :return: Tensor of shape (K1, K2, ..., KN)\n \"\"\"\n\n # Assert all vectors are on the same device\n device = vectors[0].device\n assert all(\n v.device == device for v in vectors\n ), \"All vectors must be on the same device\"\n\n # Number of vectors (N) and dimensions (K)\n # N, K = vectors.shape\n N = len(vectors)\n Ks = [len(v) for v in vectors]\n if output is None:\n output = torch.zeros(Ks, dtype=vectors[0].dtype, device=vectors[0].device)\n else:\n assert output.shape == tuple(Ks), \"Output tensor has incorrect shape\"\n output.zero_()\n\n # Reshape each vector to have a unique non-singleton dimension\n for i in range(N):\n expanded_shape = [1] * N\n expanded_shape[i] = Ks[i]\n reshaped_vector = vectors[i].view(*expanded_shape)\n output += reshaped_vector\n\n return output"
},
{
"identifier": "compute_variances",
"path": "solver.py",
"snippet": "def compute_variances(X, Y):\n \"\"\"\n Compute variances between all combinations of vectors in X and Y.\n This function is equivalent to the following code:\n ```\n variances = torch.zeros((X.size(0), Y.size(0)))\n for i in range(X.size(0)):\n for j in range(Y.size(0)):\n concatenated = torch.cat((X[i], Y[j]))\n variances[i, j] = torch.var(concatenated, unbiased=False)\n ```\n However, it is much faster because it uses pre-computed sums and sums of squares.\n\n\n :param X: Tensor of shape (N, K)\n :param Y: Tensor of shape (M, L)\n \"\"\"\n\n # Compute sums and sums of squares for X\n sum_X = torch.sum(X, dim=1)\n sum_sq_X = torch.sum(X**2, dim=1)\n\n # Compute sums and sums of squares for Y\n sum_Y = torch.sum(Y, dim=1)\n sum_sq_Y = torch.sum(Y**2, dim=1)\n\n # Lengths of vectors in X and Y\n len_X = X.shape[1]\n len_Y = Y.shape[1]\n\n # Broadcasting sums and sum of squares for all combinations\n total_sum = sum_X.unsqueeze(1) + sum_Y.unsqueeze(0)\n total_sum_sq = sum_sq_X.unsqueeze(1) + sum_sq_Y.unsqueeze(0)\n total_len = len_X + len_Y\n\n # Compute variances\n mean = total_sum / total_len\n variances = total_sum_sq / total_len - mean**2\n\n return variances"
},
{
"identifier": "get_max_numel",
"path": "solver.py",
"snippet": "def get_max_numel(dtype, memory_capacity=None, device=\"cpu\"):\n \"\"\"\n Compute the maximum number of elements that fit in specified memory.\n\n :param dtype: Data type of the tensor (e.g., torch.float32)\n :param memory_capacity: Memory capacity in bytes\n :param device: 'cpu' or 'cuda'\n :return: maximum number of elements that fit\n \"\"\"\n\n # Get memory capacity\n if memory_capacity is None:\n memory_capacity = get_free_memory(device)\n\n # Calculate maximum number of elements that fit\n element_size = torch.tensor(\n [], dtype=dtype\n ).element_size() # Size in bytes of one element\n max_numel = memory_capacity // element_size\n\n return max_numel"
},
{
"identifier": "check_matrix_fit_and_num_chunks",
"path": "solver.py",
"snippet": "def check_matrix_fit_and_num_chunks(\n dimensions, dtype, memory_capacity=None, device=\"cpu\"\n):\n \"\"\"\n Check if a tensor of given dimensions and data type fits in specified memory.\n If not, return chunk sizes that maximize the capacity, slicing only along the first dimension.\n\n :param dimensions: Tuple of dimensions for the tensor\n :param dtype: Data type of the tensor (e.g., torch.float32)\n :param memory_capacity: Memory capacity in bytes\n :param device: 'cpu' or 'cuda'\n :return: number of chunks along the first dimension\n \"\"\"\n\n # Get memory capacity\n if memory_capacity is None:\n memory_capacity = get_memory_capacity(device)\n\n # Calculate total number of elements\n total_elements = 1\n for dim in dimensions:\n total_elements *= dim\n\n element_size = torch.tensor(\n [], dtype=dtype\n ).element_size() # Size in bytes of one element\n total_size = total_elements * element_size # Total memory required for the tensor\n\n if total_size <= memory_capacity:\n return 1\n\n # If doesn't fit, calculate chunk size for the first dimension\n other_dims_product = 1\n for dim in dimensions[1:]:\n other_dims_product *= dim\n\n max_first_dim_size = memory_capacity // (other_dims_product * element_size)\n if max_first_dim_size == 0:\n raise ValueError(\"Tensor does not fit in memory.\")\n\n num_chunks = math.ceil(dimensions[0] / max_first_dim_size)\n\n return num_chunks"
},
{
"identifier": "convert_property_to_categorical",
"path": "solver.py",
"snippet": "def convert_property_to_categorical(property):\n \"\"\"\n Convert the properties to a categorical variable.\n\n :param property: List of properties for each rower.\n Shape: (num_rowers)\n dtype: Any\n\n :return: Tensor of categorical properties.\n Shape: (num_rowers)\n dtype: torch.long\n \"\"\"\n\n unique_properties = set()\n for p in property:\n unique_properties.add(p)\n unique_properties = sorted(list(unique_properties))\n property = [unique_properties.index(p) for p in property]\n property = torch.tensor(property)\n return property"
},
{
"identifier": "extract_best_assignment",
"path": "solver.py",
"snippet": "def extract_best_assignment(assignments_per_week, total_score):\n \"\"\"\n Extract the best assignment for each outing.\n\n :param assignments_per_week: Tensor of assignments per week.\n shape: (num_outings, num_combinations, num_rowers)\n :param total_score: Tensor of total score for each assignment.\n shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n\n :return: Tensor of best assignment per outing.\n shape: (num_outings, 1, num_rowers)\n\n \"\"\"\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n # Find the top assignments\n # top_inds = torch.argsort(total_score.flatten(), descending=True)[0]\n top_idx = torch.argmax(total_score.flatten())\n\n top_idx = unravel_indices(top_idx, total_score.shape)\n\n # top_inds tells us for each outing the index of the top assignment\n top_assignment = torch.zeros(\n num_outings,\n 1,\n num_rowers,\n dtype=torch.uint8,\n device=assignments_per_week.device,\n )\n for outing_idx, comb_idx in enumerate(top_idx):\n top_assignment[outing_idx] = assignments_per_week[outing_idx, comb_idx]\n\n return top_assignment"
},
{
"identifier": "get_no_overlap_inds",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef get_no_overlap_inds(A, B):\n \"\"\"\n Perform matrix multiplication of A and B in chunks.\n Return the indices of rows in A and columns in B that have no overlap.\n Overlap is defined as a non-zero value in the product of A and B.\n\n :param A: First matrix\n shape: (num_combinations_A, num_rowers)\n :param B: Second matrix\n shape: (num_combinations_B, num_rowers)\n :param chunk_sizes: Chunk sizes for the first dimension of A\n :return: indices of rows in A and columns in B that have no overlap\n \"\"\"\n\n # check if the product of the two matrices fits in memory\n # if not, chunk the matrices and check for overlap in chunks\n num_chunks = check_matrix_fit_and_num_chunks(\n (A.shape[0], A.shape[1], B.shape[0]), dtype=A.dtype, device=A.device\n )\n\n # num_chunks = 1\n def multiply_and_find(A_chunk, B):\n # counts the number of double-assignments for each rower between the two boats\n assignment_count = torch.matmul(A_chunk, B.T)\n no_overlap_inds = torch.nonzero(assignment_count == 0)\n return no_overlap_inds\n\n # if the product fits in memory, check for overlap in one go\n if num_chunks == 1:\n return multiply_and_find(A, B)\n\n A_chunks = torch.chunk(A, num_chunks)\n\n # otherwise, chunk the matrices and check for overlap in chunks\n no_overlap_inds = []\n offset_idx = 0\n for A_chunk in tqdm.tqdm(A_chunks):\n # no_overlap_inds.append(multiply_and_find(A_chunk, B).tolist())\n chunk_inds = multiply_and_find(A_chunk, B)\n\n # add the chunk size to offset the indices\n chunk_inds[:, 0] += offset_idx\n offset_idx += A_chunk.shape[0]\n no_overlap_inds.append(chunk_inds)\n\n return torch.cat(no_overlap_inds)"
},
{
"identifier": "generate_binary_matrices",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef generate_binary_matrices(\n num_rowers,\n boat_sizes,\n device=\"cpu\",\n max_num_combinations=NUM_MAX_COMBINATION_PER_BOAT,\n):\n \"\"\"\n Generate binary matrices for each combination of rowers in boats.\n\n :param num_rowers: Total number of rowers\n :param boat_sizes: List of boat sizes\n \"\"\"\n per_boat_binary_matrices = []\n for boat_size in boat_sizes:\n # Precompute indices for combinations\n row_indices = []\n col_indices = []\n\n num_combinations = math.comb(num_rowers, boat_size)\n if num_combinations > max_num_combinations:\n M = torch.zeros((max_num_combinations, num_rowers), dtype=torch.bool)\n\n keep_indices = sample(\n torch.arange(num_combinations), k=max_num_combinations\n )\n keep_indices = keep_indices.sort().values\n i = 0\n for row, combination in enumerate(\n itertools.combinations(range(num_rowers), boat_size)\n ):\n if keep_indices[i] != row:\n continue\n for col in combination:\n row_indices.append(i)\n col_indices.append(col)\n i += 1\n if i == max_num_combinations:\n break\n\n else:\n M = torch.zeros((num_combinations, num_rowers), dtype=torch.bool)\n for row, combination in enumerate(\n itertools.combinations(range(num_rowers), boat_size)\n ):\n for col in combination:\n row_indices.append(row)\n col_indices.append(col)\n\n # Use advanced indexing to fill the matrix\n M[row_indices, col_indices] = 1\n per_boat_binary_matrices.append(M)\n return per_boat_binary_matrices"
},
{
"identifier": "eliminate_invalid_boats",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef eliminate_invalid_boats(\n binary_matrix, rower_sides, num_max_combinations=NUM_MAX_COMBINATION_PER_BOAT\n):\n \"\"\"\n Eliminate invalid boats from a binary matrix.\n\n Currently we consider a boat invalid if there are more rowers on one side than the other.\n We represent stroke as 1 and bow as -1 and 0 for no preference.\n\n :param binary_matrix: Binary matrix of rower combinations\n shape: (num_combinations, num_rowers)\n :return: Binary matrix with invalid boats eliminated\n \"\"\"\n\n # gather the rower sides for each rower in each boat for each combination\n num_assigned_rowers = binary_matrix[0].sum()\n # assert each row has the same number of assigned rowers\n assert (binary_matrix.sum(dim=1) == num_assigned_rowers).all()\n assert len(rower_sides) == binary_matrix.shape[1]\n idx = binary_matrix.nonzero()[:, 1].view(len(binary_matrix), num_assigned_rowers)\n outings = rower_sides[idx]\n\n # Compute the offset between the number of stroke and bow seats\n offset = torch.sum(outings, dim=1).abs()\n # Determine the number of rowers that are both stroke and bow seat\n count_where_both = torch.sum(outings == 0, dim=1)\n\n # Eliminate invalid boats\n is_valid = count_where_both >= offset\n binary_matrix = binary_matrix[is_valid]\n\n if len(binary_matrix) > num_max_combinations:\n binary_matrix = sample(binary_matrix, k=num_max_combinations)\n\n return binary_matrix"
},
{
"identifier": "generate_valid_assignments",
"path": "solver.py",
"snippet": "@torch.no_grad()\ndef generate_valid_assignments(\n single_boat_bin_matrices, num_max_combinations=NUM_MAX_PAIRWISE_COMBINATION\n):\n \"\"\"\n Generate valid combinations of rowers across multiple boats on a single outing\n\n :param matrices: List of binary matrices, each representing combinations for a boat.\n shape: List[\n Tensor(num_combinations_1, num_rowers),\n Tensor(num_combinations_2, num_rowers),\n ...\n Tensor(num_combinations_n, num_rowers),\n ]\n :return: Tensor of valid combinations across all boats.\n \"\"\"\n assert len(single_boat_bin_matrices) > 0, \"Must have at least one boat\"\n assert all(\n m.shape[1] == single_boat_bin_matrices[0].shape[1]\n for m in single_boat_bin_matrices\n ), \"All matrices must have the same number of rowers\"\n\n assignments = single_boat_bin_matrices[0]\n for boat_ind, boat_B in enumerate(single_boat_bin_matrices[1:], start=2):\n no_overlap_inds = get_no_overlap_inds(assignments, boat_B)\n\n if len(no_overlap_inds) > num_max_combinations:\n no_overlap_inds = sample(no_overlap_inds, k=num_max_combinations)\n\n A_inds, B_inds = no_overlap_inds.T\n\n # update boat_A to be the combination of boat_A and boat_B with no overlap\n assignments = assignments[A_inds] + boat_B[B_inds] * boat_ind\n return assignments"
},
{
"identifier": "evaluate_skill_variance",
"path": "solver.py",
"snippet": "def evaluate_skill_variance(assignments_per_week, skill_levels, dtype=torch.float16):\n \"\"\"\n This relies on the notion that the skill levels entered are not categorical\n but integer values (or can be mapped to ordered categories, e.g. M1 > M2 > M3 ... )\n\n :param assignments_per_week: Tensor of assignments per week.\n shape: (num_outings, num_combinations, num_rowers)\n\n :param skill_levels: Tensor of skill levels for each rower.\n shape: (num_rowers,)\n\n :return: Tensor of variance for each combination in each outing.\n shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n \"\"\"\n\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx][0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n max_num_boats = assignments_per_week.max().item()\n outing_variance = torch.zeros(\n num_outings, num_combinations, device=assignments_per_week.device, dtype=dtype\n )\n for boat_idx in range(max_num_boats):\n boat_assignment = assignments_per_week == boat_idx + 1\n # we use binary masking\n X = skill_levels * boat_assignment\n\n # but we need to make sure that we don't include the rowers that are not assigned\n X_sum = X.sum(dim=2)\n X_len = boat_assignment.sum(dim=2)\n X_mean = X_sum / X_len\n\n boat_variance = ((X - X_mean.unsqueeze_(2)) * boat_assignment) ** 2\n boat_variance = boat_variance.sum(dim=2)\n\n # we use the unbiased variance since the sample size is small\n boat_variance /= torch.clamp(X_len - 1, min=1)\n\n outing_variance += boat_variance\n\n # now we need to compute the variance between the outings across the week\n week_variance = generalized_outer_addition(outing_variance)\n return week_variance"
},
{
"identifier": "evaluate_num_preferred_outings",
"path": "solver.py",
"snippet": "def evaluate_num_preferred_outings(\n assignments_per_week, num_preferred_outings, dtype=torch.long\n):\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n assignments_per_week = assignments_per_week > 0\n\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n # just to pin memory and reuse the output tensor\n num_assignment_per_rower = torch.zeros(\n [num_combinations] * num_outings,\n device=assignments_per_week.device,\n dtype=dtype,\n )\n\n week_over_assignment = torch.zeros(\n [num_combinations] * num_outings,\n device=assignments_per_week.device,\n dtype=dtype,\n )\n\n for rower_idx in range(num_rowers):\n num_assignment_per_rower = generalized_outer_addition(\n assignments_per_week[:, :, rower_idx], output=num_assignment_per_rower\n )\n num_preferred_outings_per_rower = num_preferred_outings[rower_idx]\n assignment_diff = num_assignment_per_rower - num_preferred_outings_per_rower\n over_assignment = assignment_diff.clamp_(min=0)\n week_over_assignment += over_assignment\n\n return week_over_assignment"
},
{
"identifier": "evaluate_assignments_per_week",
"path": "solver.py",
"snippet": "def evaluate_assignments_per_week(\n assignments_per_week, properties, weights, return_stats=False\n):\n \"\"\"\n Evaluate the assignments per week.\n\n :param assignments_per_week: Tensor of num_outings different assignments for the week.\n Shape: (num_outings, num_combinations, num_rowers)\n dtype: torch.uint8\n :param properties: dict of Tensors of properties.\n Shape: {property_name: Tensor(num_rowers)}\n dtype: torch.long\n :param weights: dict of weights for each property.\n Shape: {property_name: float}\n :param return_stats: Whether to return the stats for each property.\n\n :return: Total score for the week.\n Shape: (num_combinations, num_combinations, ..., num_combinations) x num_outings\n :return: Stats for each weight category.\n \"\"\"\n\n # Compute variance of skill levels\n week_variance = evaluate_skill_variance(\n assignments_per_week, properties[\"skill_level\"]\n )\n\n # Compute number of preferred outings\n week_num_preferred_outings = evaluate_num_preferred_outings(\n assignments_per_week, properties[\"num_preferred_outings\"]\n )\n\n # Compute total score\n total_score = (\n weights[\"skill variance\"] * week_variance\n + weights[\"over assignment\"] * week_num_preferred_outings\n )\n\n if return_stats:\n stats = {\n \"values\": {\n \"skill variance\": week_variance,\n \"over assignment\": week_num_preferred_outings,\n },\n \"weights\": weights,\n \"total\": total_score,\n }\n return total_score, stats\n\n return total_score"
},
{
"identifier": "permute_top_assignments",
"path": "solver.py",
"snippet": "def permute_top_assignments(\n valid_assignments,\n assignments_per_week,\n total_scores,\n num_permutations=10,\n randomize_permutations=True,\n):\n \"\"\"\n Permute the top assignments for the week.\n \"\"\"\n num_outings, num_combinations, num_rowers = assignments_per_week.shape\n\n assert len(valid_assignments) == num_outings, \"Must have the same number of outings\"\n assert (\n len(assignments_per_week) == num_outings\n ), \"Must have the same number of outings\"\n if any(m.ndim != 2 for m in valid_assignments):\n raise ValueError(\"All outing assignments have to be 2D for every outing\")\n if any(m.shape[1] != num_rowers for m in valid_assignments):\n raise ValueError(\n \"All outing assignments have to have the same number of rowers\"\n )\n if any((m.sum(dim=1) != m[0].sum()).any() for m in valid_assignments):\n raise ValueError(\n f\"In each combination of every outing,\\\n the number of rowers assigned must be the same.\"\n )\n\n # assert all(\n # m.ndim == 2\n # for m in valid_assignments\n # ), f\"All matrices must have the same number of dim: {[m.shape for m in valid_assignments]}\"\n # assert all(\n # m.shape[1] == num_rowers\n # for m in valid_assignments\n # ), \"All matrices must have the same number of rowers\"\n # for outing_idx in range(len(valid_assignments)):\n # assert (valid_assignments[outing_idx].sum() == valid_assignments[outing_idx][0].sum()).all(),\\\n # \"Combinations must have the same number of rowers assigned in an outing\"\n\n # assert that the number of assigned rowers is the same for each outing\n for outing_idx in range(len(assignments_per_week)):\n num_assigned_rowers = assignments_per_week[outing_idx, 0].sum()\n assert (\n assignments_per_week[outing_idx].sum(dim=1) == num_assigned_rowers\n ).all()\n\n best_assignment = extract_best_assignment(assignments_per_week, total_scores)\n\n # in the permutations we fix all outings except the outing we are permuting\n permuted_assignment = best_assignment.repeat(1, num_permutations + 1, 1)\n for outing_idx in range(len(assignments_per_week)):\n # just copy the best assignment num_permutations times\n if randomize_permutations:\n # we need to make sure that the best assignment is included\n permuted_assignment[outing_idx, 1:] = sample(\n valid_assignments[outing_idx], k=num_permutations\n )\n else:\n permuted_assignment[outing_idx, 1:] = valid_assignments[outing_idx][\n :num_permutations\n ]\n return permuted_assignment"
}
] | import torch
import unittest
import math
from unittest.mock import patch
from solver import (
unravel_indices,
generalized_outer_addition,
compute_variances,
get_max_numel,
check_matrix_fit_and_num_chunks,
convert_property_to_categorical,
extract_best_assignment,
get_no_overlap_inds,
generate_binary_matrices,
eliminate_invalid_boats,
generate_valid_assignments,
evaluate_skill_variance,
evaluate_num_preferred_outings,
evaluate_assignments_per_week,
permute_top_assignments,
) | 5,894 |
class TestUnravelIndices(unittest.TestCase):
def test_simple_case(self):
indices = torch.tensor([0, 1, 2, 3, 4, 5])
shape = (2, 3)
expected_result = torch.tensor([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]])
|
class TestUnravelIndices(unittest.TestCase):
def test_simple_case(self):
indices = torch.tensor([0, 1, 2, 3, 4, 5])
shape = (2, 3)
expected_result = torch.tensor([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1], [1, 2]]) | result = unravel_indices(indices, shape) | 0 | 2023-12-18 05:12:36+00:00 | 8k |
Azure-Samples/functions-python-web-crawler | .venv/Lib/site-packages/azure/functions/decorators/function_app.py | [
{
"identifier": "GenericInputBinding",
"path": ".venv/Lib/site-packages/azure/functions/decorators/generic.py",
"snippet": "class GenericInputBinding(InputBinding):\n\n @staticmethod\n def get_binding_name():\n pass\n\n def __init__(self,\n name: str,\n type: str,\n data_type: Optional[DataType] = None,\n **kwargs):\n super().__init__(name=name, data_type=data_type, type=type)"
},
{
"identifier": "GenericTrigger",
"path": ".venv/Lib/site-packages/azure/functions/decorators/generic.py",
"snippet": "class GenericTrigger(Trigger):\n\n @staticmethod\n def get_binding_name():\n pass\n\n def __init__(self,\n name: str,\n type: str,\n data_type: Optional[DataType] = None,\n **kwargs):\n super().__init__(name=name, data_type=data_type, type=type)"
},
{
"identifier": "GenericOutputBinding",
"path": ".venv/Lib/site-packages/azure/functions/decorators/generic.py",
"snippet": "class GenericOutputBinding(OutputBinding):\n\n @staticmethod\n def get_binding_name():\n pass\n\n def __init__(self,\n name: str,\n type: str,\n data_type: Optional[DataType] = None,\n **kwargs):\n super().__init__(name=name, data_type=data_type, type=type)"
},
{
"identifier": "WarmUpTrigger",
"path": ".venv/Lib/site-packages/azure/functions/decorators/warmup.py",
"snippet": "class WarmUpTrigger(Trigger):\n @staticmethod\n def get_binding_name() -> str:\n return WARMUP_TRIGGER\n\n def __init__(self,\n name: str,\n data_type: Optional[DataType] = None,\n **kwargs) -> None:\n super().__init__(name=name, data_type=data_type)"
},
{
"identifier": "AsgiMiddleware",
"path": ".venv/Lib/site-packages/azure/functions/_http_asgi.py",
"snippet": "class AsgiMiddleware:\n \"\"\"This middleware is to adapt an ASGI supported Python server\n framework into Azure Functions. It can be used by either calling the\n .handle() function or exposing the .main property in a HttpTrigger.\n \"\"\"\n _logger = logging.getLogger('azure.functions.AsgiMiddleware')\n _usage_reported = False\n\n def __init__(self, app):\n \"\"\"Instantiate an ASGI middleware to convert Azure Functions HTTP\n request into ASGI Python object. Example on handling ASGI app in a HTTP\n trigger by overwriting the .main() method:\n\n import azure.functions as func\n\n from FastapiApp import app\n\n main = func.AsgiMiddleware(app).main\n \"\"\"\n if not self._usage_reported:\n self._logger.debug(\"Starting Azure Functions ASGI middleware.\")\n self._usage_reported = True\n\n self._app = app\n self.main = self._handle\n\n def handle(self, req: HttpRequest, context: Optional[Context] = None):\n \"\"\"Deprecated. Please use handle_async instead:\n\n import azure.functions as func\n\n from FastapiApp import app\n\n async def main(req, context):\n return await func.AsgiMiddleware(app).handle_async(req, context)\n \"\"\"\n warn(\"handle() is deprecated. Please await .handle_async() instead.\",\n DeprecationWarning, stacklevel=2)\n self._logger.warning(\n \"handle() is deprecated. Please `await .handle_async()` instead.\")\n return self._handle(req, context)\n\n def _handle(self, req, context):\n asgi_request = AsgiRequest(req, context)\n scope = asgi_request.to_asgi_http_scope()\n asgi_response = asyncio.run(\n AsgiResponse.from_app(self._app, scope, req.get_body())\n )\n\n return asgi_response.to_func_response()\n\n async def handle_async(self,\n req: HttpRequest,\n context: Optional[Context] = None):\n \"\"\"Method to convert an Azure Functions HTTP request into a ASGI\n Python object. Example on handling ASGI app in a HTTP trigger by\n calling .handle_async() in .main() method:\n\n import azure.functions as func\n\n from FastapiApp import app\n\n async def main(req, context):\n return await func.AsgiMiddleware(app).handle_async(req,\n context)\n \"\"\"\n return await self._handle_async(req, context)\n\n async def _handle_async(self, req, context):\n asgi_request = AsgiRequest(req, context)\n scope = asgi_request.to_asgi_http_scope()\n asgi_response = await AsgiResponse.from_app(self._app,\n scope,\n req.get_body())\n return asgi_response.to_func_response()"
},
{
"identifier": "WsgiMiddleware",
"path": ".venv/Lib/site-packages/azure/functions/_http_wsgi.py",
"snippet": "class WsgiRequest:\nclass WsgiResponse:\nclass WsgiMiddleware:\n def __init__(self,\n func_req: HttpRequest,\n func_ctx: Optional[Context] = None):\n def to_environ(self, errors_buffer: StringIO) -> Dict[str, Any]:\n def _get_port(self, parsed_url, lowercased_headers: Dict[str, str]) -> int:\n def _get_http_headers(self,\n func_headers: Dict[str, str]) -> Dict[str, str]:\n def __init__(self):\n def from_app(cls, app, environ) -> 'WsgiResponse':\n def to_func_response(self) -> HttpResponse:\n def _start_response(self, status: str, response_headers: List[Any]):\n def __init__(self, app):\n def handle(self, req: HttpRequest, context: Optional[Context] = None):\n def _handle(self, req, context):\n def _handle_errors(self, wsgi_response):"
}
] | import abc
import json
import logging
from abc import ABC
from datetime import time
from typing import Any, Callable, Dict, List, Optional, Union, \
Iterable
from azure.functions.decorators.blob import BlobTrigger, BlobInput, BlobOutput
from azure.functions.decorators.core import Binding, Trigger, DataType, \
AuthLevel, SCRIPT_FILE_NAME, Cardinality, AccessRights
from azure.functions.decorators.cosmosdb import CosmosDBTrigger, \
CosmosDBOutput, CosmosDBInput, CosmosDBTriggerV3, CosmosDBInputV3, \
CosmosDBOutputV3
from azure.functions.decorators.eventgrid import EventGridTrigger, \
EventGridOutput
from azure.functions.decorators.eventhub import EventHubTrigger, EventHubOutput
from azure.functions.decorators.http import HttpTrigger, HttpOutput, \
HttpMethod
from azure.functions.decorators.queue import QueueTrigger, QueueOutput
from azure.functions.decorators.servicebus import ServiceBusQueueTrigger, \
ServiceBusQueueOutput, ServiceBusTopicTrigger, \
ServiceBusTopicOutput
from azure.functions.decorators.table import TableInput, TableOutput
from azure.functions.decorators.timer import TimerTrigger
from azure.functions.decorators.utils import parse_singular_param_to_enum, \
parse_iterable_param_to_enums, StringifyEnumJsonEncoder
from azure.functions.http import HttpRequest
from .generic import GenericInputBinding, GenericTrigger, GenericOutputBinding
from .warmup import WarmUpTrigger
from .._http_asgi import AsgiMiddleware
from .._http_wsgi import WsgiMiddleware, Context | 3,850 | The generic_output_binding decorator adds :class:`GenericOutputBinding`
to the :class:`FunctionBuilder` object for building :class:`Function`
object used in worker function indexing model.
This is equivalent to defining a generic output binding in the
function.json which enables function to write data from a
custom defined output source.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-custom
:param arg_name: The name of output parameter in the function code.
:param type: The type of binding.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=GenericOutputBinding(
name=arg_name,
type=type,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
class FunctionRegister(DecoratorApi, HttpFunctionsAuthLevelMixin, ABC):
def __init__(self, auth_level: Union[AuthLevel, str], *args, **kwargs):
"""Interface for declaring top level function app class which will
be directly indexed by Python Function runtime.
:param auth_level: Determines what keys, if any, need to be present
on the request in order to invoke the function.
:param args: Variable length argument list.
:param kwargs: Arbitrary keyword arguments.
"""
DecoratorApi.__init__(self, *args, **kwargs)
HttpFunctionsAuthLevelMixin.__init__(self, auth_level, *args, **kwargs)
self._require_auth_level: Optional[bool] = None
def get_functions(self) -> List[Function]:
"""Get the function objects in the function app.
:return: List of functions in the function app.
"""
functions = [function_builder.build(self.auth_level)
for function_builder in self._function_builders]
if not self._require_auth_level:
self._require_auth_level = any(
function.is_http_function() for function in functions)
if not self._require_auth_level:
logging.warning(
'Auth level is not applied to non http '
'function app. Ref: '
'https://docs.microsoft.com/azure/azure-functions/functions'
'-bindings-http-webhook-trigger?tabs=in-process'
'%2Cfunctionsv2&pivots=programming-language-python#http-auth')
return functions
def register_functions(self, function_container: DecoratorApi) -> None:
"""Register a list of functions in the function app.
:param function_container: Instance extending :class:`DecoratorApi`
which contains a list of functions.
"""
if isinstance(function_container, FunctionRegister):
raise TypeError('functions can not be type of FunctionRegister!')
self._function_builders.extend(function_container._function_builders)
register_blueprint = register_functions
class FunctionApp(FunctionRegister, TriggerApi, BindingApi):
"""FunctionApp object used by worker function indexing model captures
user defined functions and metadata.
Ref: https://aka.ms/azure-function-ref
"""
def __init__(self,
http_auth_level: Union[AuthLevel, str] = AuthLevel.FUNCTION):
"""Constructor of :class:`FunctionApp` object.
:param http_auth_level: Determines what keys, if any, need to be
present
on the request in order to invoke the function.
"""
super().__init__(auth_level=http_auth_level)
class Blueprint(TriggerApi, BindingApi):
"""Functions container class where all the functions
loaded in it can be registered in :class:`FunctionRegister` subclasses
but itself can not be indexed directly. The class contains all existing
supported trigger and binding decorator functions.
"""
pass
class ExternalHttpFunctionApp(FunctionRegister, TriggerApi, ABC):
"""Interface to extend for building third party http function apps."""
@abc.abstractmethod
def _add_http_app(self,
http_middleware: Union[
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class Function(object):
"""The function object represents a function in Function App. It
encapsulates function metadata and callable and used in the worker
function indexing model. Ref: https://aka.ms/azure-function-ref
"""
def __init__(self, func: Callable[..., Any], script_file: str):
"""Constructor of :class:`FunctionBuilder` object.
:param func: User defined python function instance.
:param script_file: File name indexed by worker to find function.
"""
self._name: str = func.__name__
self._func = func
self._trigger: Optional[Trigger] = None
self._bindings: List[Binding] = []
self.function_script_file = script_file
self.http_type = 'function'
self._is_http_function = False
def add_binding(self, binding: Binding) -> None:
"""Add a binding instance to the function.
:param binding: The binding object to add.
"""
self._bindings.append(binding)
def add_trigger(self, trigger: Trigger) -> None:
"""Add a trigger instance to the function.
:param trigger: The trigger object to add.
:raises ValueError: Raises trigger already exists error if a trigger is
being added to a function which has trigger attached.
"""
if self._trigger:
raise ValueError("A trigger was already registered to this "
"function. Adding another trigger is not the "
"correct behavior as a function can only have one"
" trigger. Existing registered trigger "
f"is {self._trigger.get_dict_repr()} and New "
f"trigger "
f"being added is {trigger.get_dict_repr()}")
self._trigger = trigger
# We still add the trigger info to the bindings to ensure that
# function.json is complete
self._bindings.append(trigger)
def set_function_name(self, function_name: Optional[str] = None) -> None:
"""Set or update the name for the function if :param:`function_name`
is not None. If not set, function name will default to python
function name.
:param function_name: Name the function set to.
"""
if function_name:
self._name = function_name
def set_http_type(self, http_type: str) -> None:
"""Set or update the http type for the function if :param:`http_type`
.
:param http_type: Http function type.
"""
self.http_type = http_type
def is_http_function(self) -> bool:
return self._is_http_function
def get_trigger(self) -> Optional[Trigger]:
"""Get attached trigger instance of the function.
:return: Trigger instance or None.
"""
return self._trigger
def get_bindings(self) -> List[Binding]:
"""Get all the bindings attached to the function.
:return: Bindings attached to the function.
"""
return self._bindings
def get_raw_bindings(self) -> List[str]:
return [json.dumps(b.get_dict_repr(), cls=StringifyEnumJsonEncoder)
for b in self._bindings]
def get_bindings_dict(self) -> Dict:
"""Get dictionary representation of the bindings of the function.
:return: Dictionary representation of the bindings.
"""
return {"bindings": [b.get_dict_repr() for b in self._bindings]}
def get_dict_repr(self) -> Dict:
"""Get the dictionary representation of the function.
:return: The dictionary representation of the function.
"""
stub_f_json = {
"scriptFile": self.function_script_file
}
stub_f_json.update(self.get_bindings_dict()) # NoQA
return stub_f_json
def get_user_function(self) -> Callable[..., Any]:
"""Get the python function customer defined.
:return: The python function customer defined.
"""
return self._func
def get_function_name(self) -> str:
"""Get the function name.
:return: Function name.
"""
return self._name
def get_function_json(self) -> str:
"""Get the json stringified form of function.
:return: The json stringified form of function.
"""
return json.dumps(self.get_dict_repr(), cls=StringifyEnumJsonEncoder)
def __str__(self):
return self.get_function_json()
class FunctionBuilder(object):
def __init__(self, func, function_script_file):
self._function = Function(func, function_script_file)
def __call__(self, *args, **kwargs):
pass
def configure_function_name(self, function_name: str) -> 'FunctionBuilder':
self._function.set_function_name(function_name)
return self
def configure_http_type(self, http_type: str) -> 'FunctionBuilder':
self._function.set_http_type(http_type)
return self
def add_trigger(self, trigger: Trigger) -> 'FunctionBuilder':
self._function.add_trigger(trigger=trigger)
return self
def add_binding(self, binding: Binding) -> 'FunctionBuilder':
self._function.add_binding(binding=binding)
return self
def _validate_function(self,
auth_level: Optional[AuthLevel] = None) -> None:
"""
Validates the function information before building the function.
:param auth_level: Http auth level that will be set if http
trigger function auth level is None.
"""
function_name = self._function.get_function_name()
trigger = self._function.get_trigger()
if trigger is None:
raise ValueError(
f"Function {function_name} does not have a trigger. A valid "
f"function must have one and only one trigger registered.")
bindings = self._function.get_bindings()
if trigger not in bindings:
raise ValueError(
f"Function {function_name} trigger {trigger} not present"
f" in bindings {bindings}")
# Set route to function name if unspecified in the http trigger
# Set auth level to function app auth level if unspecified in the
# http trigger
if Trigger.is_supported_trigger_type(trigger, HttpTrigger):
if getattr(trigger, 'route', None) is None:
getattr(trigger, 'init_params').append('route')
setattr(trigger, 'route', function_name)
if getattr(trigger, 'auth_level',
None) is None and auth_level is not None:
getattr(trigger, 'init_params').append('auth_level')
setattr(trigger, 'auth_level',
parse_singular_param_to_enum(auth_level, AuthLevel))
self._function._is_http_function = True
def build(self, auth_level: Optional[AuthLevel] = None) -> Function:
"""
Validates and builds the function object.
:param auth_level: Http auth level that will be set if http
trigger function auth level is None.
"""
self._validate_function(auth_level)
return self._function
class DecoratorApi(ABC):
"""Interface which contains essential decorator function building blocks
to extend for creating new function app or blueprint classes.
"""
def __init__(self, *args, **kwargs):
self._function_builders: List[FunctionBuilder] = []
self._app_script_file: str = SCRIPT_FILE_NAME
@property
def app_script_file(self) -> str:
"""Name of function app script file in which all the functions
are defined. \n
Script file defined here is for placeholder purpose, please refer to
worker defined script file path as the single point of truth.
:return: Script file name.
"""
return self._app_script_file
def _validate_type(self,
func: Union[Callable[..., Any], FunctionBuilder]) \
-> FunctionBuilder:
"""Validate the type of the function object and return the created
:class:`FunctionBuilder` object.
:param func: Function object passed to
:meth:`_configure_function_builder`
:raises ValueError: Raise error when func param is neither
:class:`Callable` nor :class:`FunctionBuilder`.
:return: :class:`FunctionBuilder` object.
"""
if isinstance(func, FunctionBuilder):
fb = self._function_builders.pop()
elif callable(func):
fb = FunctionBuilder(func, self._app_script_file)
else:
raise ValueError(
"Unsupported type for function app decorator found.")
return fb
def _configure_function_builder(self, wrap) -> Callable[..., Any]:
"""Decorator function on user defined function to create and return
:class:`FunctionBuilder` object from :class:`Callable` func.
"""
def decorator(func):
fb = self._validate_type(func)
self._function_builders.append(fb)
return wrap(fb)
return decorator
def function_name(self, name: str) -> Callable[..., Any]:
"""Set name of the :class:`Function` object.
:param name: Name of the function.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.configure_function_name(name)
return fb
return decorator()
return wrap
def http_type(self, http_type: str) -> Callable[..., Any]:
"""Set http type of the :class:`Function` object.
:param http_type: Http type of the function.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.configure_http_type(http_type)
return fb
return decorator()
return wrap
class HttpFunctionsAuthLevelMixin(ABC):
"""Interface to extend for enabling function app level http
authorization level setting"""
def __init__(self, auth_level: Union[AuthLevel, str], *args, **kwargs):
self._auth_level = AuthLevel[auth_level] \
if isinstance(auth_level, str) else auth_level
@property
def auth_level(self) -> AuthLevel:
"""Authorization level of the function app. Will be applied to the http
trigger functions which do not have authorization level specified.
:return: Authorization level of the function app.
"""
return self._auth_level
class TriggerApi(DecoratorApi, ABC):
"""Interface to extend for using existing trigger decorator functions."""
def route(self,
route: Optional[str] = None,
trigger_arg_name: str = 'req',
binding_arg_name: str = '$return',
methods: Optional[
Union[Iterable[str], Iterable[HttpMethod]]] = None,
auth_level: Optional[Union[AuthLevel, str]] = None,
trigger_extra_fields: Dict[str, Any] = {},
binding_extra_fields: Dict[str, Any] = {}
) -> Callable[..., Any]:
"""The route decorator adds :class:`HttpTrigger` and
:class:`HttpOutput` binding to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining HttpTrigger
and HttpOutput binding in the function.json which enables your
function be triggered when http requests hit the specified route.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-http
:param route: Route for the http endpoint, if None, it will be set
to function name if present or user defined python function name.
:param trigger_arg_name: Argument name for :class:`HttpRequest`,
defaults to 'req'.
:param binding_arg_name: Argument name for :class:`HttpResponse`,
defaults to '$return'.
:param methods: A tuple of the HTTP methods to which the function
responds.
:param auth_level: Determines what keys, if any, need to be present
on the request in order to invoke the function.
:return: Decorator function.
:param trigger_extra_fields: Additional fields to include in trigger
json. For example,
>>> data_type='STRING' # 'dataType': 'STRING' in trigger json
:param binding_extra_fields: Additional fields to include in binding
json. For example,
>>> data_type='STRING' # 'dataType': 'STRING' in binding json
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(trigger=HttpTrigger(
name=trigger_arg_name,
methods=parse_iterable_param_to_enums(methods, HttpMethod),
auth_level=parse_singular_param_to_enum(auth_level,
AuthLevel),
route=route, **trigger_extra_fields))
fb.add_binding(binding=HttpOutput(
name=binding_arg_name, **binding_extra_fields))
return fb
return decorator()
return wrap
def timer_trigger(self,
arg_name: str,
schedule: str,
run_on_startup: Optional[bool] = None,
use_monitor: Optional[bool] = None,
data_type: Optional[Union[DataType, str]] = None,
**kwargs: Any) -> Callable[..., Any]:
"""The schedule or timer decorator adds :class:`TimerTrigger` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining TimerTrigger
in the function.json which enables your function be triggered on the
specified schedule.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-timer
:param arg_name: The name of the variable that represents the
:class:`TimerRequest` object in function code.
:param schedule: A string representing a CRON expression that will
be used to schedule a function to run.
:param run_on_startup: If true, the function is invoked when the
runtime starts.
:param use_monitor: Set to true or false to indicate whether the
schedule should be monitored.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=TimerTrigger(
name=arg_name,
schedule=schedule,
run_on_startup=run_on_startup,
use_monitor=use_monitor,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
schedule = timer_trigger
def warm_up_trigger(self,
arg_name: str,
data_type: Optional[Union[DataType, str]] = None,
**kwargs) -> Callable:
"""The warm up decorator adds :class:`WarmUpTrigger` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining WarmUpTrigger
in the function.json which enables your function be triggered on the
specified schedule.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-warmup
:param arg_name: The name of the variable that represents the
:class:`TimerRequest` object in function code.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=WarmUpTrigger(
name=arg_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def service_bus_queue_trigger(
self,
arg_name: str,
connection: str,
queue_name: str,
data_type: Optional[Union[DataType, str]] = None,
access_rights: Optional[Union[AccessRights, str]] = None,
is_sessions_enabled: Optional[bool] = None,
cardinality: Optional[Union[Cardinality, str]] = None,
**kwargs: Any) -> Callable[..., Any]:
"""The on_service_bus_queue_change decorator adds
:class:`ServiceBusQueueTrigger` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining ServiceBusQueueTrigger
in the function.json which enables your function be triggered when
new message(s) are sent to the service bus queue.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-service-bus
:param arg_name: The name of the variable that represents the
:class:`ServiceBusMessage` object in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Service Bus.
:param queue_name: Name of the queue to monitor.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param access_rights: Access rights for the connection string.
:param is_sessions_enabled: True if connecting to a session-aware
queue or subscription.
:param cardinality: Set to many in order to enable batching.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=ServiceBusQueueTrigger(
name=arg_name,
connection=connection,
queue_name=queue_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
access_rights=parse_singular_param_to_enum(
access_rights,
AccessRights),
is_sessions_enabled=is_sessions_enabled,
cardinality=parse_singular_param_to_enum(cardinality,
Cardinality),
**kwargs))
return fb
return decorator()
return wrap
def service_bus_topic_trigger(
self,
arg_name: str,
connection: str,
topic_name: str,
subscription_name: str,
data_type: Optional[Union[DataType, str]] = None,
access_rights: Optional[Union[AccessRights, str]] = None,
is_sessions_enabled: Optional[bool] = None,
cardinality: Optional[Union[Cardinality, str]] = None,
**kwargs: Any) -> Callable[..., Any]:
"""The on_service_bus_topic_change decorator adds
:class:`ServiceBusTopicTrigger` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining ServiceBusTopicTrigger
in the function.json which enables function to be triggered when new
message(s) are sent to the service bus topic.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-service-bus
:param arg_name: The name of the variable that represents the
:class:`ServiceBusMessage` object in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Service Bus.
:param topic_name: Name of the topic to monitor.
:param subscription_name: Name of the subscription to monitor.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param access_rights: Access rights for the connection string.
:param is_sessions_enabled: True if connecting to a session-aware
queue or subscription.
:param cardinality: Set to many in order to enable batching.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=ServiceBusTopicTrigger(
name=arg_name,
connection=connection,
topic_name=topic_name,
subscription_name=subscription_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
access_rights=parse_singular_param_to_enum(
access_rights,
AccessRights),
is_sessions_enabled=is_sessions_enabled,
cardinality=parse_singular_param_to_enum(cardinality,
Cardinality),
**kwargs))
return fb
return decorator()
return wrap
def queue_trigger(self,
arg_name: str,
queue_name: str,
connection: str,
data_type: Optional[DataType] = None,
**kwargs) -> Callable[..., Any]:
"""The queue_trigger decorator adds :class:`QueueTrigger` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining QueueTrigger
in the function.json which enables function to be triggered when new
message(s) are sent to the storage queue.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-queue
:param arg_name: The name of the variable that represents the
:class:`QueueMessage` object in function code.
:param queue_name: The name of the queue to poll.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Azure Queues.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=QueueTrigger(
name=arg_name,
queue_name=queue_name,
connection=connection,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def event_hub_message_trigger(self,
arg_name: str,
connection: str,
event_hub_name: str,
data_type: Optional[
Union[DataType, str]] = None,
cardinality: Optional[
Union[Cardinality, str]] = None,
consumer_group: Optional[
str] = None,
**kwargs: Any) -> Callable[..., Any]:
"""The event_hub_message_trigger decorator adds
:class:`EventHubTrigger`
to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining EventHubTrigger
in the function.json which enables function to be triggered when new
message(s) are sent to the event hub.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-event-hubs
:param arg_name: The name of the variable that represents
:class:`EventHubEvent` object in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Event Hubs.
:param event_hub_name: The name of the event hub.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param cardinality: Set to many in order to enable batching.
:param consumer_group: An optional property that sets the consumer
group used to subscribe to events in the hub.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=EventHubTrigger(
name=arg_name,
connection=connection,
event_hub_name=event_hub_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
cardinality=parse_singular_param_to_enum(cardinality,
Cardinality),
consumer_group=consumer_group,
**kwargs))
return fb
return decorator()
return wrap
def cosmos_db_trigger_v3(self,
arg_name: str,
database_name: str,
collection_name: str,
connection_string_setting: str,
lease_collection_name: Optional[str] = None,
lease_connection_string_setting: Optional[
str] = None,
lease_database_name: Optional[str] = None,
create_lease_collection_if_not_exists: Optional[
bool] = None,
leases_collection_throughput: Optional[int] =
None,
lease_collection_prefix: Optional[str] = None,
checkpoint_interval: Optional[int] = None,
checkpoint_document_count: Optional[int] = None,
feed_poll_delay: Optional[int] = None,
lease_renew_interval: Optional[int] = None,
lease_acquire_interval: Optional[int] = None,
lease_expiration_interval: Optional[int] = None,
max_items_per_invocation: Optional[int] = None,
start_from_beginning: Optional[bool] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs: Any) -> \
Callable[..., Any]:
"""The cosmos_db_trigger_v3 decorator adds :class:`CosmosDBTrigger`
to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This decorator will work only with extension bundle 2.x
or 3.x. For additional details, please refer
https://aka.ms/cosmosdb-v4-update.
This is equivalent to defining CosmosDBTrigger in the function.json
which enables function to be triggered when CosmosDB data is changed.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-cosmosdb-v2
:param arg_name: The name of the variable that represents
:class:`DocumentList` object in function code.
:param database_name: The name of the Azure Cosmos DB database with
the collection being monitored.
:param collection_name: The name of the collection being monitored.
:param connection_string_setting: The name of an app setting or
setting collection that specifies how to connect to the Azure Cosmos
DB account being monitored.
:param lease_collection_name: The name of the collection used to
store leases.
:param lease_connection_string_setting: The name of an app setting
or setting collection that specifies how to connect to the Azure
Cosmos DB account that holds the lease collection.
:param lease_database_name: The name of the database that holds the
collection used to store leases.
:param create_lease_collection_if_not_exists: When set to true,
the leases collection is automatically created when it doesn't
already exist.
:param leases_collection_throughput: Defines the number of Request
Units to assign when the leases collection is created.
:param lease_collection_prefix: When set, the value is added as a
prefix to the leases created in the Lease collection for this
Function.
:param checkpoint_interval: When set, it defines, in milliseconds,
the interval between lease checkpoints. Default is always after a
Function call.
:param checkpoint_document_count: Customizes the amount of documents
between lease checkpoints. Default is always after a Function call.
:param feed_poll_delay: The time (in milliseconds) for the delay
between polling a partition for new changes on the feed, after all
current changes are drained.
:param lease_renew_interval: When set, it defines, in milliseconds,
the renew interval for all leases for partitions currently held by
an instance.
:param lease_acquire_interval: When set, it defines,
in milliseconds, the interval to kick off a task to compute if
partitions are distributed evenly among known host instances.
:param lease_expiration_interval: When set, it defines,
in milliseconds, the interval for which the lease is taken on a
lease representing a partition.
:param max_items_per_invocation: When set, this property sets the
maximum number of items received per Function call.
:param start_from_beginning: This option tells the Trigger to read
changes from the beginning of the collection's change history
instead of starting at the current time.
:param preferred_locations: Defines preferred locations (regions)
for geo-replicated database accounts in the Azure Cosmos DB service.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
trigger = CosmosDBTriggerV3(
name=arg_name,
database_name=database_name,
collection_name=collection_name,
connection_string_setting=connection_string_setting,
lease_collection_name=lease_collection_name,
lease_connection_string_setting=lease_connection_string_setting,
lease_database_name=lease_database_name,
create_lease_collection_if_not_exists=create_lease_collection_if_not_exists, # NoQA
leases_collection_throughput=leases_collection_throughput,
lease_collection_prefix=lease_collection_prefix,
checkpoint_interval=checkpoint_interval,
checkpoint_document_count=checkpoint_document_count,
feed_poll_delay=feed_poll_delay,
lease_renew_interval=lease_renew_interval,
lease_acquire_interval=lease_acquire_interval,
lease_expiration_interval=lease_expiration_interval,
max_items_per_invocation=max_items_per_invocation,
start_from_beginning=start_from_beginning,
preferred_locations=preferred_locations,
data_type=parse_singular_param_to_enum(data_type, DataType),
**kwargs)
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(trigger=trigger)
return fb
return decorator()
return wrap
def cosmos_db_trigger(self,
arg_name: str,
connection: str,
database_name: str,
container_name: str,
lease_connection: Optional[str] = None,
lease_database_name: Optional[str] = None,
lease_container_name: Optional[str] = None,
create_lease_container_if_not_exists: Optional[
bool] = None,
leases_container_throughput: Optional[int] = None,
lease_container_prefix: Optional[str] = None,
feed_poll_delay: Optional[int] = None,
lease_acquire_interval: Optional[int] = None,
lease_expiration_interval: Optional[int] = None,
lease_renew_interval: Optional[int] = None,
max_items_per_invocation: Optional[int] = None,
start_from_beginning: Optional[time] = None,
start_from_time: Optional[time] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs: Any) -> \
Callable[..., Any]:
"""The cosmos_db_trigger decorator adds :class:`CosmosDBTrigger`
to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This decorator will work only with extension bundle 4.x
and above. For additional details, please refer
https://aka.ms/cosmosdb-v4-update.
This is equivalent to defining CosmosDBTrigger in the function.json
which enables function to be triggered when CosmosDB data is changed.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-cosmosdb-v4
:param arg_name: The name of the variable that represents
:class:`DocumentList` object in function code
:param connection: The name of an app setting or setting collection
that specifies how to connect to the Azure Cosmos DB account being
monitored.
:param database_name: The name of the Azure Cosmos DB database with
the collection being monitored
:param container_name: The name of the container being monitored
:param lease_connection: (Optional) The name of an app setting or
setting container that specifies how to connect to the Azure Cosmos
DB account that holds the lease container
:param lease_database_name: The name of the database that holds the
collection used to store leases
:param lease_container_name: (Optional) The name of the container used
to store leases. When not set, the value leases is used
:param create_lease_container_if_not_exists: (Optional) When set to
true, the leases container is automatically created when it doesn't
already exist. The default value is false. When using Azure AD
identities if you set the value to true, creating containers is not an
allowed operation and your Function won't be able to start
:param leases_container_throughput: (Optional) Defines the number of
Request Units to assign when the leases container is created. This
setting is only used when createLeaseContainerIfNotExists is set to
true. This parameter is automatically set when the binding is created
using the portal
:param lease_container_prefix: (Optional) When set, the value is added
as a prefix to the leases created in the Lease container for this
function. Using a prefix allows two separate Azure Functions to share
the same Lease container by using different prefixes
:param feed_poll_delay: The time (in milliseconds) for the delay
between polling a partition for new changes on the feed, after all
current changes are drained
:param lease_acquire_interval: When set, it defines,
in milliseconds, the interval to kick off a task to compute if
partitions are distributed evenly among known host instances
:param lease_expiration_interval: When set, it defines,
in milliseconds, the interval for which the lease is taken on a
lease representing a partition
:param lease_renew_interval: When set, it defines, in milliseconds,
the renew interval for all leases for partitions currently held by
an instance
:param max_items_per_invocation: When set, this property sets the
maximum number of items received per Function call
:param start_from_beginning: This option tells the Trigger to read
changes from the beginning of the collection's change history
instead of starting at the current time
:param start_from_time: (Optional) Gets or sets the date and time from
which to initialize the change feed read operation. The recommended
format is ISO 8601 with the UTC designator, such as
2021-02-16T14:19:29Z. This is only used to set the initial trigger
state. After the trigger has a lease state, changing this value has
no effect
:param preferred_locations: Defines preferred locations (regions)
for geo-replicated database accounts in the Azure Cosmos DB service
:param data_type: Defines how Functions runtime should treat the
parameter value
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json
:return: Decorator function.
"""
trigger = CosmosDBTrigger(
name=arg_name,
connection=connection,
database_name=database_name,
container_name=container_name,
lease_connection=lease_connection,
lease_database_name=lease_database_name,
lease_container_name=lease_container_name,
create_lease_container_if_not_exists=create_lease_container_if_not_exists, # NoQA
leases_container_throughput=leases_container_throughput,
lease_container_prefix=lease_container_prefix,
feed_poll_delay=feed_poll_delay,
lease_acquire_interval=lease_acquire_interval,
lease_expiration_interval=lease_expiration_interval,
lease_renew_interval=lease_renew_interval,
max_items_per_invocation=max_items_per_invocation,
start_from_beginning=start_from_beginning,
start_from_time=start_from_time,
preferred_locations=preferred_locations,
data_type=parse_singular_param_to_enum(data_type, DataType),
**kwargs)
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(trigger=trigger)
return fb
return decorator()
return wrap
def blob_trigger(self,
arg_name: str,
path: str,
connection: str,
data_type: Optional[DataType] = None,
**kwargs) -> Callable[..., Any]:
"""
The blob_change_trigger decorator adds :class:`BlobTrigger` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining BlobTrigger
in the function.json which enables function to be triggered when new
message(s) are sent to the storage blobs.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-storage-blob
:param arg_name: The name of the variable that represents the
:class:`InputStream` object in function code.
:param path: The path to the blob.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Azure Blobs.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=BlobTrigger(
name=arg_name,
path=path,
connection=connection,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def event_grid_trigger(self,
arg_name: str,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) -> Callable[..., Any]:
"""
The event_grid_trigger decorator adds
:class:`EventGridTrigger`
to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining event grid trigger
in the function.json which enables function to be triggered to
respond to an event sent to an event grid topic.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/eventgridtrigger
:param arg_name: the variable name used in function code for the
parameter that receives the event data.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=EventGridTrigger(
name=arg_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def generic_trigger(self,
arg_name: str,
type: str,
data_type: Optional[Union[DataType, str]] = None,
**kwargs
) -> Callable[..., Any]:
"""
The generic_trigger decorator adds :class:`GenericTrigger`
to the :class:`FunctionBuilder` object for building :class:`Function`
object used in worker function indexing model.
This is equivalent to defining a generic trigger in the
function.json which triggers function to execute when generic trigger
events are received by host.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-custom
:param arg_name: The name of trigger parameter in the function code.
:param type: The type of binding.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_trigger(
trigger=GenericTrigger(
name=arg_name,
type=type,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
class BindingApi(DecoratorApi, ABC):
"""Interface to extend for using existing binding decorator functions."""
def service_bus_queue_output(self,
arg_name: str,
connection: str,
queue_name: str,
data_type: Optional[
Union[DataType, str]] = None,
access_rights: Optional[Union[
AccessRights, str]] = None,
**kwargs) -> \
Callable[..., Any]:
"""The service_bus_queue_output decorator adds
:class:`ServiceBusQueueOutput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining ServiceBusQueueOutput
in the function.json which enables function to write message(s) to
the service bus queue.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-service-bus
:param arg_name: The name of the variable that represents service
bus queue output object in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Service Bus.
:param queue_name: Name of the queue to monitor.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param access_rights: Access rights for the connection string.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=ServiceBusQueueOutput(
name=arg_name,
connection=connection,
queue_name=queue_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
access_rights=parse_singular_param_to_enum(
access_rights, AccessRights),
**kwargs))
return fb
return decorator()
return wrap
def service_bus_topic_output(self,
arg_name: str,
connection: str,
topic_name: str,
subscription_name: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
access_rights: Optional[Union[
AccessRights, str]] = None,
**kwargs) -> \
Callable[..., Any]:
"""The service_bus_topic_output decorator adds
:class:`ServiceBusTopicOutput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining ServiceBusTopicOutput
in the function.json which enables function to write message(s) to
the service bus topic.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-service-bus
:param arg_name: The name of the variable that represents service
bus topic output object in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Service Bus.
:param topic_name: Name of the topic to monitor.
:param subscription_name: Name of the subscription to monitor.
:param data_type: Defines how Functions runtime should treat the
parameter value, defaults to DataType.UNDEFINED.
:param access_rights: Access rights for the connection string.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=ServiceBusTopicOutput(
name=arg_name,
connection=connection,
topic_name=topic_name,
subscription_name=subscription_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
access_rights=parse_singular_param_to_enum(
access_rights,
AccessRights),
**kwargs))
return fb
return decorator()
return wrap
def queue_output(self,
arg_name: str,
queue_name: str,
connection: str,
data_type: Optional[DataType] = None,
**kwargs) -> Callable[..., Any]:
"""The queue_output decorator adds :class:`QueueOutput` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining QueueOutput
in the function.json which enables function to write message(s) to
the storage queue.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-queue
:param arg_name: The name of the variable that represents storage
queue output object in function code.
:param queue_name: The name of the queue to poll.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Azure Queues.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=QueueOutput(name=arg_name,
queue_name=queue_name,
connection=connection,
data_type=parse_singular_param_to_enum(
data_type, DataType),
**kwargs))
return fb
return decorator()
return wrap
def event_hub_output(self,
arg_name: str,
connection: str,
event_hub_name: str,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) -> \
Callable[..., Any]:
"""The event_hub_output decorator adds
:class:`EventHubOutput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining EventHubOutput
in the function.json which enables function to write message(s) to
the event hub.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-event-hubs
:param arg_name: The name of the variable that represents event hub
output object in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Event Hub.
:param event_hub_name: The name of the event hub.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=EventHubOutput(
name=arg_name,
connection=connection,
event_hub_name=event_hub_name,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def cosmos_db_output_v3(self,
arg_name: str,
database_name: str,
collection_name: str,
connection_string_setting: str,
create_if_not_exists: Optional[bool] = None,
partition_key: Optional[str] = None,
collection_throughput: Optional[int] = None,
use_multiple_write_locations: Optional[
bool] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) \
-> Callable[..., Any]:
"""The cosmos_db_output_v3 decorator adds
:class:`CosmosDBOutput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This decorator will work only with extension bundle 2.x
or 3.x. For additional details, please refer
https://aka.ms/cosmosdb-v4-update.
This is equivalent to defining CosmosDBOutput
in the function.json which enables function to write to the CosmosDB.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-cosmosdb-v2
:param arg_name: The name of the variable that represents CosmosDB
output object in function code.
:param database_name: The name of the Azure Cosmos DB database with
the collection being monitored.
:param collection_name: The name of the collection being monitored.
:param connection_string_setting: The name of an app setting or
setting collection that specifies how to connect to the Azure Cosmos
DB account being monitored.
:param create_if_not_exists: A boolean value to indicate whether the
collection is created when it doesn't exist.
:param partition_key: When CreateIfNotExists is true, it defines the
partition key path for the created collection.
:param collection_throughput: When CreateIfNotExists is true,
it defines the throughput of the created collection.
:param use_multiple_write_locations: When set to true along with
PreferredLocations, it can leverage multi-region writes in the Azure
Cosmos DB service.
:param preferred_locations: Defines preferred locations (regions)
for geo-replicated database accounts in the Azure Cosmos DB service.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=CosmosDBOutputV3(
name=arg_name,
database_name=database_name,
collection_name=collection_name,
connection_string_setting=connection_string_setting,
create_if_not_exists=create_if_not_exists,
partition_key=partition_key,
collection_throughput=collection_throughput,
use_multiple_write_locations=use_multiple_write_locations, # NoQA
preferred_locations=preferred_locations,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def cosmos_db_output(self,
arg_name: str,
connection: str,
database_name: str,
container_name: str,
create_if_not_exists: Optional[bool] = None,
partition_key: Optional[str] = None,
container_throughput: Optional[int] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) \
-> Callable[..., Any]:
"""The cosmos_db_output decorator adds
:class:`CosmosDBOutput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This decorator will work only with extension bundle 4.x
and above. For additional details, please refer
https://aka.ms/cosmosdb-v4-update.
This is equivalent to defining CosmosDBOutput
in the function.json which enables function to write to the CosmosDB.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-cosmosdb-v4
:param arg_name: The name of the variable that represents CosmosDB
output object in function code.
:param connection: The name of an app setting or
setting collection that specifies how to connect to the Azure Cosmos
DB account being monitored
:param database_name: The name of the Azure Cosmos DB database with
the collection being monitored
:param container_name: The name of the container being monitored
:param create_if_not_exists: A boolean value to indicate whether the
collection is created when it doesn't exist
:param partition_key: When CreateIfNotExists is true, it defines the
partition key path for the created collection
:param container_throughput: When createIfNotExists is true, it defines
the throughput of the created container
PreferredLocations, it can leverage multi-region writes in the Azure
Cosmos DB service
:param preferred_locations: Defines preferred locations (regions)
for geo-replicated database accounts in the Azure Cosmos DB service
:param data_type: Defines how Functions runtime should treat the
parameter value
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=CosmosDBOutput(
name=arg_name,
connection=connection,
database_name=database_name,
container_name=container_name,
create_if_not_exists=create_if_not_exists,
partition_key=partition_key,
container_throughput=container_throughput,
preferred_locations=preferred_locations,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def cosmos_db_input_v3(self,
arg_name: str,
database_name: str,
collection_name: str,
connection_string_setting: str,
id: Optional[str] = None,
sql_query: Optional[str] = None,
partition_key: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) \
-> Callable[..., Any]:
"""The cosmos_db_input_v3 decorator adds
:class:`CosmosDBInput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This decorator will work only with extension bundle 2.x
or 3.x. For additional details, please refer
https://aka.ms/cosmosdb-v4-update.
This is equivalent to defining CosmosDBInput
in the function.json which enables function to read from CosmosDB.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-cosmosdb-v2
:param arg_name: The name of the variable that represents
:class:`DocumentList` input object in function code.
:param database_name: The database containing the document.
:param collection_name: The name of the collection that contains the
document.
:param connection_string_setting: The name of the app setting
containing your Azure Cosmos DB connection string.
:param id: The ID of the document to retrieve.
:param sql_query: An Azure Cosmos DB SQL query used for retrieving
multiple documents.
:param partition_key: Specifies the partition key value for the
lookup.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=CosmosDBInputV3(
name=arg_name,
database_name=database_name,
collection_name=collection_name,
connection_string_setting=connection_string_setting,
id=id,
sql_query=sql_query,
partition_key=partition_key,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def cosmos_db_input(self,
arg_name: str,
connection: str,
database_name: str,
container_name: str,
partition_key: Optional[str] = None,
id: Optional[str] = None,
sql_query: Optional[str] = None,
preferred_locations: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) \
-> Callable[..., Any]:
"""The cosmos_db_input decorator adds
:class:`CosmosDBInput` to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This decorator will work only with extension bundle 4.x
and above. For additional details, please refer
https://aka.ms/cosmosdb-v4-update.
This is equivalent to defining CosmosDBInput in the function.json which
enables function to read from CosmosDB.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-cosmosdb-v4
:param arg_name: The name of the variable that represents
:class:`DocumentList` input object in function code
:param connection: The name of an app setting or setting container that
specifies how to connect to the Azure Cosmos DB account being
monitored containing your Azure Cosmos DB connection string
:param database_name: The database containing the document
:param container_name: The name of the container that contains the
document
:param partition_key: Specifies the partition key value for the
lookup
:param id: The ID of the document to retrieve
:param sql_query: An Azure Cosmos DB SQL query used for retrieving
multiple documents
:param preferred_locations: (Optional) Defines preferred locations
(regions) for geo-replicated database accounts in the Azure Cosmos DB
service. Values should be comma-separated. For example, East US,South
Central US,North Europe
:param data_type: Defines how Functions runtime should treat the
parameter value
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=CosmosDBInput(
name=arg_name,
connection=connection,
database_name=database_name,
container_name=container_name,
partition_key=partition_key,
id=id,
sql_query=sql_query,
preferred_locations=preferred_locations,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def blob_input(self,
arg_name: str,
path: str,
connection: str,
data_type: Optional[DataType] = None,
**kwargs) -> Callable[..., Any]:
"""
The blob_input decorator adds :class:`BlobInput` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining BlobInput
in the function.json which enables function to write message(s) to
the storage blobs.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-storage-blob
:param arg_name: The name of the variable that represents the blob in
function code.
:param path: The path to the blob.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Azure Blobs.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=BlobInput(
name=arg_name,
path=path,
connection=connection,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def blob_output(self,
arg_name: str,
path: str,
connection: str,
data_type: Optional[DataType] = None,
**kwargs) -> Callable[..., Any]:
"""
The blob_output decorator adds :class:`BlobOutput` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining BlobOutput
in the function.json which enables function to write message(s) to
the storage blobs.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-storage-blob
:param arg_name: The name of the variable that represents the blob in
function code.
:param path: The path to the blob.
:param connection: The name of an app setting or setting collection
that specifies how to connect to Azure Blobs.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=BlobOutput(
name=arg_name,
path=path,
connection=connection,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def event_grid_output(self,
arg_name: str,
topic_endpoint_uri: str,
topic_key_setting: str,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs) -> Callable[..., Any]:
"""
The event_grid_output decorator adds
:class:`EventGridOutput`
to the :class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining output binding
in the function.json which enables function to
write events to a custom topic.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/eventgridtrigger
:param arg_name: The variable name used in function code that
represents the event.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param topic_endpoint_uri: The name of an app setting that
contains the URI for the custom topic.
:param topic_key_setting: The name of an app setting that
contains an access key for the custom topic.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=EventGridOutput(
name=arg_name,
topic_endpoint_uri=topic_endpoint_uri,
topic_key_setting=topic_key_setting,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def table_input(self,
arg_name: str,
connection: str,
table_name: str,
row_key: Optional[str] = None,
partition_key: Optional[str] = None,
take: Optional[int] = None,
filter: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None) -> Callable[..., Any]:
"""
The table_input decorator adds :class:`TableInput` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining TableInput
in the function.json which enables function to read a table in
an Azure Storage or Cosmos DB account
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/tablesbindings
:param arg_name: The name of the variable that represents
the table or entity in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to the table service.
:param table_name: The Name of the table
:param row_key: The row key of the table entity to read.
:param partition_key: The partition key of the table entity to read.
:param take: The maximum number of entities to return
:param filter: An OData filter expression for the entities to return
from the table.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=TableInput(
name=arg_name,
connection=connection,
table_name=table_name,
row_key=row_key,
partition_key=partition_key,
take=take,
filter=filter,
data_type=parse_singular_param_to_enum(data_type,
DataType)))
return fb
return decorator()
return wrap
def table_output(self,
arg_name: str,
connection: str,
table_name: str,
row_key: Optional[str] = None,
partition_key: Optional[str] = None,
data_type: Optional[
Union[DataType, str]] = None) -> Callable[..., Any]:
"""
The table_output decorator adds :class:`TableOutput` to the
:class:`FunctionBuilder` object
for building :class:`Function` object used in worker function
indexing model. This is equivalent to defining TableOutput
in the function.json which enables function to write entities
to a table in an Azure Storage
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/tablesbindings
:param arg_name: The name of the variable that represents
the table or entity in function code.
:param connection: The name of an app setting or setting collection
that specifies how to connect to the table service.
:param table_name: The Name of the table
:param row_key: The row key of the table entity to read.
:param partition_key: The partition key of the table entity to read.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=TableOutput(
name=arg_name,
connection=connection,
table_name=table_name,
row_key=row_key,
partition_key=partition_key,
data_type=parse_singular_param_to_enum(data_type,
DataType)))
return fb
return decorator()
return wrap
def generic_input_binding(self,
arg_name: str,
type: str,
data_type: Optional[Union[DataType, str]] = None,
**kwargs
) -> Callable[..., Any]:
"""
The generic_input_binding decorator adds :class:`GenericInputBinding`
to the :class:`FunctionBuilder` object for building :class:`Function`
object used in worker function indexing model.
This is equivalent to defining a generic input binding in the
function.json which enables function to read data from a
custom defined input source.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-custom
:param arg_name: The name of input parameter in the function code.
:param type: The type of binding.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=GenericInputBinding(
name=arg_name,
type=type,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
def generic_output_binding(self,
arg_name: str,
type: str,
data_type: Optional[
Union[DataType, str]] = None,
**kwargs
) -> Callable[..., Any]:
"""
The generic_output_binding decorator adds :class:`GenericOutputBinding`
to the :class:`FunctionBuilder` object for building :class:`Function`
object used in worker function indexing model.
This is equivalent to defining a generic output binding in the
function.json which enables function to write data from a
custom defined output source.
All optional fields will be given default value by function host when
they are parsed by function host.
Ref: https://aka.ms/azure-function-binding-custom
:param arg_name: The name of output parameter in the function code.
:param type: The type of binding.
:param data_type: Defines how Functions runtime should treat the
parameter value.
:param kwargs: Keyword arguments for specifying additional binding
fields to include in the binding json.
:return: Decorator function.
"""
@self._configure_function_builder
def wrap(fb):
def decorator():
fb.add_binding(
binding=GenericOutputBinding(
name=arg_name,
type=type,
data_type=parse_singular_param_to_enum(data_type,
DataType),
**kwargs))
return fb
return decorator()
return wrap
class FunctionRegister(DecoratorApi, HttpFunctionsAuthLevelMixin, ABC):
def __init__(self, auth_level: Union[AuthLevel, str], *args, **kwargs):
"""Interface for declaring top level function app class which will
be directly indexed by Python Function runtime.
:param auth_level: Determines what keys, if any, need to be present
on the request in order to invoke the function.
:param args: Variable length argument list.
:param kwargs: Arbitrary keyword arguments.
"""
DecoratorApi.__init__(self, *args, **kwargs)
HttpFunctionsAuthLevelMixin.__init__(self, auth_level, *args, **kwargs)
self._require_auth_level: Optional[bool] = None
def get_functions(self) -> List[Function]:
"""Get the function objects in the function app.
:return: List of functions in the function app.
"""
functions = [function_builder.build(self.auth_level)
for function_builder in self._function_builders]
if not self._require_auth_level:
self._require_auth_level = any(
function.is_http_function() for function in functions)
if not self._require_auth_level:
logging.warning(
'Auth level is not applied to non http '
'function app. Ref: '
'https://docs.microsoft.com/azure/azure-functions/functions'
'-bindings-http-webhook-trigger?tabs=in-process'
'%2Cfunctionsv2&pivots=programming-language-python#http-auth')
return functions
def register_functions(self, function_container: DecoratorApi) -> None:
"""Register a list of functions in the function app.
:param function_container: Instance extending :class:`DecoratorApi`
which contains a list of functions.
"""
if isinstance(function_container, FunctionRegister):
raise TypeError('functions can not be type of FunctionRegister!')
self._function_builders.extend(function_container._function_builders)
register_blueprint = register_functions
class FunctionApp(FunctionRegister, TriggerApi, BindingApi):
"""FunctionApp object used by worker function indexing model captures
user defined functions and metadata.
Ref: https://aka.ms/azure-function-ref
"""
def __init__(self,
http_auth_level: Union[AuthLevel, str] = AuthLevel.FUNCTION):
"""Constructor of :class:`FunctionApp` object.
:param http_auth_level: Determines what keys, if any, need to be
present
on the request in order to invoke the function.
"""
super().__init__(auth_level=http_auth_level)
class Blueprint(TriggerApi, BindingApi):
"""Functions container class where all the functions
loaded in it can be registered in :class:`FunctionRegister` subclasses
but itself can not be indexed directly. The class contains all existing
supported trigger and binding decorator functions.
"""
pass
class ExternalHttpFunctionApp(FunctionRegister, TriggerApi, ABC):
"""Interface to extend for building third party http function apps."""
@abc.abstractmethod
def _add_http_app(self,
http_middleware: Union[ | AsgiMiddleware, WsgiMiddleware]) -> None: | 4 | 2023-12-16 04:12:01+00:00 | 8k |
ict-bigdatalab/RIGHT | data_preprocess.py | [
{
"identifier": "MySimCSE",
"path": "dense_retrieval.py",
"snippet": "class MySimCSE(SimCSE):\n\n def encode(self, sentence: Union[str, List[str]],\n device: str = None,\n return_numpy: bool = False,\n normalize_to_unit: bool = True,\n keepdim: bool = False,\n batch_size: int = 64,\n max_length: int = 128) -> Union[ndarray, Tensor]:\n\n target_device = self.device if device is None else device\n self.model = self.model.to(target_device)\n\n single_sentence = False\n if isinstance(sentence, str):\n sentence = [sentence]\n single_sentence = True\n\n embedding_list = []\n with torch.no_grad():\n total_batch = len(sentence) // batch_size + (1 if len(sentence) % batch_size > 0 else 0)\n for batch_id in range(total_batch):\n inputs = self.tokenizer(\n sentence[batch_id * batch_size:(batch_id + 1) * batch_size],\n padding=True,\n truncation=True,\n max_length=max_length,\n return_tensors=\"pt\"\n )\n inputs = {k: v.to(target_device) for k, v in inputs.items()}\n outputs = self.model(**inputs, return_dict=True)\n if self.pooler == \"cls\":\n embeddings = outputs.pooler_output\n elif self.pooler == \"cls_before_pooler\":\n embeddings = outputs.last_hidden_state[:, 0]\n elif self.pooler == 'all_token_pooling':\n embeddings = outputs.last_hidden_state[:, 1:].mean(1)\n else:\n raise NotImplementedError\n if normalize_to_unit:\n embeddings = embeddings / embeddings.norm(dim=1, keepdim=True)\n embedding_list.append(embeddings.cpu())\n embeddings = torch.cat(embedding_list, 0)\n\n if single_sentence and not keepdim:\n embeddings = embeddings[0]\n\n if return_numpy and not isinstance(embeddings, ndarray):\n return embeddings.numpy()\n return embeddings\n\n def search(self, queries: Union[str, List[str]],\n device: str = None,\n threshold: float = 0.6,\n top_k: int = 5) -> Union[List[Tuple[str, float]], List[List[Tuple[str, float]]]]:\n if not self.is_faiss_index:\n if isinstance(queries, list):\n combined_results = []\n for query in queries:\n results = self.search(query, device)\n combined_results.append(results)\n return combined_results\n\n similarities = self.similarity(queries, self.index[\"index\"]).tolist()\n id_and_score = []\n for i, s in enumerate(similarities):\n if s >= threshold:\n id_and_score.append((i, s))\n id_and_score = sorted(id_and_score, key=lambda x: x[1], reverse=True)[:top_k]\n results = [(idx, score) for idx, score in id_and_score]\n return results\n else:\n query_vecs = self.encode(queries, device=device, normalize_to_unit=True, keepdim=True, return_numpy=True)\n\n distance, idx = self.index[\"index\"].search(query_vecs.astype(np.float32), top_k)\n\n def pack_single_result(dist, idx):\n results = [(i, s) for i, s in zip(idx, dist) if s >= threshold]\n return results\n\n if isinstance(queries, list):\n combined_results = []\n for i in range(len(queries)):\n results = pack_single_result(distance[i], idx[i])\n combined_results.append(results)\n return combined_results\n else:\n return pack_single_result(distance[0], idx[0])"
},
{
"identifier": "get_transformed_io",
"path": "get_datasets.py",
"snippet": "def get_transformed_io(src_path, dst_path):\n src = read_line_examples_from_file(src_path)\n dst = read_line_examples_from_file(dst_path)\n assert len(src) == len(dst)\n print(f\"Total examples = {len(dst)}\")\n targets, hashtags = get_para_targets(dst)\n assert len(src) == len(targets)\n return src, targets, hashtags"
},
{
"identifier": "get_hashtag_list",
"path": "get_datasets.py",
"snippet": "def get_hashtag_list(dst):\n tags = dst.split('[SEP]')\n target = []\n for j in range(len(tags)):\n tags[j] = tags[j].strip()\n if tags[j] != '' and tags[j] not in target:\n target.append(tags[j])\n # if the dst is nothing\n if len(target) == 0:\n target.append('None')\n # statistic_hashtags(hashtags)\n return target"
},
{
"identifier": "f1",
"path": "eval_utils.py",
"snippet": "def f1(pre, rec):\n if pre == 0 and rec == 0:\n return 0.0\n return 2 * pre * rec / (pre + rec)"
},
{
"identifier": "random_augmentation",
"path": "data_augmentation.py",
"snippet": "def random_augmentation(words, n=1):\n for _ in range(20):\n random_percent = random.random()\n if random_percent <= 0.7:\n new_words = synonym_replacement(words, n)\n elif random_percent <= 0.8:\n new_words = random_deletion(words, n)\n elif random_percent <= 0.9:\n new_words = random_swap(words, n)\n elif random_percent <= 1:\n new_words = random_insertion(words, n)\n if new_words != words:\n return new_words\n return new_words + ' ' + stop_words[random.randint(0, 178)]"
}
] | import json
import csv
import jieba
import jieba.posseg as pseg
from tqdm import tqdm
from gensim import corpora
from gensim.summarization.bm25 import BM25
from nltk.corpus import stopwords
from transformers import BertModel
from transformers import BertTokenizer
from dense_retrieval import MySimCSE
from get_datasets import get_transformed_io, get_hashtag_list
from eval_utils import f1
from functools import cmp_to_key
from data_augmentation import random_augmentation | 4,067 | with open(val_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating val queries result!")
# test query
test_query_result = []
print("Start to create test queries result...")
for i in tqdm(range(len(test_queries))):
query = test_queries[i]
results = model.search(query, device='cuda', threshold=-99, top_k=10)
test_query_item = dict()
test_query_item['index'] = [ind for ind, score in results]
test_query_item['score'] = [score for ind, score in results]
test_query_result.append(test_query_item)
json_str = json.dumps(test_query_result, indent=4)
with open(test_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating test queries result!")
def clean_repetition_datasets(str_data_path, dst_data_path):
# completed
str_out_path = str_data_path + "_after_cleaning.txt"
dst_out_path = dst_data_path + "_after_cleaning.txt"
documents = []
dst = []
with open(str_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
documents.append(line)
with open(dst_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
dst.append(line)
print(len(documents))
print(len(dst))
new_doc = []
new_dst = []
for idx in range(len(documents)):
if documents[idx] not in new_doc or dst[idx] not in new_dst:
new_doc.append(documents[idx])
new_dst.append(dst[idx])
else:
print(idx)
print(documents[idx])
print(dst[idx])
print('=' * 30)
print(len(new_doc))
print(len(new_dst))
with open(str_out_path, 'w', encoding='utf-8') as f:
for doc in new_doc:
f.write(doc + '\n')
with open(dst_out_path, 'w', encoding='utf-8') as f:
for d in new_dst:
f.write(d + '\n')
def preprocess_wangyue_data(post_path, conv_path, tag_path, src_path, dst_path):
post_list = []
with open(post_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
post_list.append(line.strip())
conv_list = []
with open(conv_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
conv_list.append(line.strip())
tag_list = []
with open(tag_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
tag_list.append(line.strip())
assert len(post_list) == len(conv_list) and len(conv_list) == len(tag_list)
src_list = []
dst_list = []
for i in range(len(post_list)):
src = post_list[i] + '. ' + conv_list[i]
dst = tag_list[i].replace(';', ' [SEP] ')
src_list.append(src)
dst_list.append(dst)
with open(src_path, 'w', encoding='utf-8') as f:
for src in src_list:
f.write(src + '\n')
with open(dst_path, 'w', encoding='utf-8') as f:
for dst in dst_list:
f.write(dst + '\n')
def compute_hashtag_coverage(labels, hashtags):
total_r = len(labels)
total_p = len(hashtags)
label_list = labels.copy()
hashtag_list = hashtags.copy()
true_num = 0
for lab in label_list:
for hashtag in hashtag_list:
if lab == hashtag:
true_num += 1
hashtag_list.remove(lab)
break
p = true_num / total_p
r = true_num / total_r
|
def generate_index_json_file(data_path):
out_path = data_path + "_index.json"
data = []
i = 0
with open(data_path, 'r', encoding='utf-8') as f:
for line in tqdm(f):
line = line.strip("\n")
if not line:
continue
item = {
"id": i,
"contents": line
}
data.append(item)
i += 1
print(i)
jsontext = json.dumps(data, indent=4)
with open(out_path, 'w') as json_file:
json_file.write(jsontext)
def bm25_retrieval_results(train_src_data_path, val_src_data_path, test_src_data_path):
train_out_path = train_src_data_path + "_bm25_index.json"
val_out_path = val_src_data_path + "_bm25_index.json"
test_out_path = test_src_data_path + "_bm25_index.json"
# read training documents
documents = []
with open(train_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
documents.append(line)
print("The number of training documents is: ", len(documents))
# read val queries
val_queries = []
with open(val_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
val_queries.append(line)
print("The number of val queries is: ", len(val_queries))
# read test queries
test_queries = []
with open(test_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
test_queries.append(line)
print("The number of test queries is: ", len(test_queries))
# build document index
# split word
texts = [doc.split() for doc in documents]
# remove stopwords
for i in range(len(texts)):
texts[i] = [word for word in texts[i] if word not in stopwords.words('english')]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
bm25_obj = BM25(corpus)
# training query
train_query_result = []
print("Start to create training queries result...")
for i in tqdm(range(len(documents))):
query = texts[i]
query_doc = dictionary.doc2bow(query)
scores = bm25_obj.get_scores(query_doc)
best_docs = sorted(range(len(scores)), key=lambda j: scores[j])[-11:][::-1]
if i in best_docs:
best_docs.remove(i)
else:
best_docs = best_docs[:10]
print(documents[i])
train_query_result.append(best_docs)
json_str = json.dumps(train_query_result, indent=4)
with open(train_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating training queries result!")
# val query
val_query_result = []
print("Start to create val queries result...")
val_texts = [vq.split() for vq in val_queries]
for i in tqdm(range(len(val_texts))):
query = [word for word in val_texts[i] if word not in stopwords.words('english')]
query_doc = dictionary.doc2bow(query)
scores = bm25_obj.get_scores(query_doc)
best_docs = sorted(range(len(scores)), key=lambda j: scores[j])[-10:][::-1]
val_query_result.append(best_docs)
json_str = json.dumps(val_query_result, indent=4)
with open(val_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating val queries result!")
# test query
test_query_result = []
print("Start to create test queries result...")
test_texts = [tq.split() for tq in test_queries]
for i in tqdm(range(len(test_texts))):
query = [word for word in test_texts[i] if word not in stopwords.words('english')]
query_doc = dictionary.doc2bow(query)
scores = bm25_obj.get_scores(query_doc)
best_docs = sorted(range(len(scores)), key=lambda j: scores[j])[-10:][::-1]
test_query_result.append(best_docs)
json_str = json.dumps(test_query_result, indent=4)
with open(test_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating test queries result!")
def bm25_retrieval_score_results(train_src_data_path, val_src_data_path, test_src_data_path):
train_out_path = train_src_data_path + "_bm25_score.json"
val_out_path = val_src_data_path + "_bm25_score.json"
test_out_path = test_src_data_path + "_bm25_score.json"
# read training documents
documents = []
with open(train_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
documents.append(line)
print("The number of training documents is: ", len(documents))
# read val queries
val_queries = []
with open(val_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
val_queries.append(line)
print("The number of val queries is: ", len(val_queries))
# read test queries
test_queries = []
with open(test_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
test_queries.append(line)
print("The number of test queries is: ", len(test_queries))
# build document index
# split word
texts = [doc.split() for doc in documents]
# remove stopwords
for i in range(len(texts)):
texts[i] = [word for word in texts[i] if word not in stopwords.words('english')]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
bm25_obj = BM25(corpus)
# training query
train_query_result = []
print("Start to create training queries result...")
for i in tqdm(range(len(documents))):
query = texts[i]
query_doc = dictionary.doc2bow(query)
scores = bm25_obj.get_scores(query_doc)
best_docs = sorted(range(len(scores)), key=lambda j: scores[j])[-11:][::-1]
if i in best_docs:
best_docs.remove(i)
else:
best_docs = best_docs[:10]
print(documents[i])
train_query_item = dict()
train_query_item['index'] = best_docs
train_query_item['score'] = [scores[doc] for doc in best_docs]
train_query_result.append(train_query_item)
json_str = json.dumps(train_query_result, indent=4)
with open(train_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating training queries result!")
# val query
val_query_result = []
print("Start to create val queries result...")
val_texts = [vq.split() for vq in val_queries]
for i in tqdm(range(len(val_texts))):
query = [word for word in val_texts[i] if word not in stopwords.words('english')]
query_doc = dictionary.doc2bow(query)
scores = bm25_obj.get_scores(query_doc)
best_docs = sorted(range(len(scores)), key=lambda j: scores[j])[-10:][::-1]
val_query_item = dict()
val_query_item['index'] = best_docs
val_query_item['score'] = [scores[doc] for doc in best_docs]
val_query_result.append(val_query_item)
json_str = json.dumps(val_query_result, indent=4)
with open(val_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating val queries result!")
# test query
test_query_result = []
print("Start to create test queries result...")
test_texts = [tq.split() for tq in test_queries]
for i in tqdm(range(len(test_texts))):
query = [word for word in test_texts[i] if word not in stopwords.words('english')]
query_doc = dictionary.doc2bow(query)
scores = bm25_obj.get_scores(query_doc)
best_docs = sorted(range(len(scores)), key=lambda j: scores[j])[-10:][::-1]
test_query_item = dict()
test_query_item['index'] = best_docs
test_query_item['score'] = [scores[doc] for doc in best_docs]
test_query_result.append(test_query_item)
json_str = json.dumps(test_query_result, indent=4)
with open(test_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating test queries result!")
def dense_retrieval_results(train_src_data_path, val_src_data_path, test_src_data_path):
train_out_path = train_src_data_path + "_bert_original_score.json"
val_out_path = val_src_data_path + "_bert_original_score.json"
test_out_path = test_src_data_path + "_bert_original_score.json"
# loading model
# model = MySimCSE("princeton-nlp/sup-simcse-roberta-large", device='cpu')
# model = MySimCSE("/home/qiupeng/frz_project/SimCSE/result/retrieval_bert_chinese_base", device='cuda', pooler='cls')
model = MySimCSE("bert-base-chinese", device='cuda', pooler='cls')
# read training documents
documents = []
with open(train_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
documents.append(line)
print("The number of training documents is: ", len(documents))
# read val queries
val_queries = []
with open(val_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
val_queries.append(line)
print("The number of val queries is: ", len(val_queries))
# read test queries
test_queries = []
with open(test_src_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
test_queries.append(line)
print("The number of test queries is: ", len(test_queries))
# create index
model.build_index(documents, device='cuda', batch_size=64)
# training query
train_query_result = []
print("Start to create training queries result...")
for i in tqdm(range(len(documents))):
results = model.search(documents[i], device='cuda', threshold=-99, top_k=11)
for k in range(len(results)):
if results[k][0] == i:
results.pop(k)
break
if len(results) > 10:
results = results[:10]
train_query_item = dict()
train_query_item['index'] = [ind for ind, score in results]
train_query_item['score'] = [score for ind, score in results]
train_query_result.append(train_query_item)
json_str = json.dumps(train_query_result, indent=4)
with open(train_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating training queries result!")
# val query
val_query_result = []
print("Start to create val queries result...")
for i in tqdm(range(len(val_queries))):
query = val_queries[i]
results = model.search(query, device='cuda', threshold=-99, top_k=10)
val_query_item = dict()
val_query_item['index'] = [ind for ind, score in results]
val_query_item['score'] = [score for ind, score in results]
val_query_result.append(val_query_item)
json_str = json.dumps(val_query_result, indent=4)
with open(val_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating val queries result!")
# test query
test_query_result = []
print("Start to create test queries result...")
for i in tqdm(range(len(test_queries))):
query = test_queries[i]
results = model.search(query, device='cuda', threshold=-99, top_k=10)
test_query_item = dict()
test_query_item['index'] = [ind for ind, score in results]
test_query_item['score'] = [score for ind, score in results]
test_query_result.append(test_query_item)
json_str = json.dumps(test_query_result, indent=4)
with open(test_out_path, 'w', encoding='utf-8') as f:
f.write(json_str)
print("Finish creating test queries result!")
def clean_repetition_datasets(str_data_path, dst_data_path):
# completed
str_out_path = str_data_path + "_after_cleaning.txt"
dst_out_path = dst_data_path + "_after_cleaning.txt"
documents = []
dst = []
with open(str_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
documents.append(line)
with open(dst_data_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
dst.append(line)
print(len(documents))
print(len(dst))
new_doc = []
new_dst = []
for idx in range(len(documents)):
if documents[idx] not in new_doc or dst[idx] not in new_dst:
new_doc.append(documents[idx])
new_dst.append(dst[idx])
else:
print(idx)
print(documents[idx])
print(dst[idx])
print('=' * 30)
print(len(new_doc))
print(len(new_dst))
with open(str_out_path, 'w', encoding='utf-8') as f:
for doc in new_doc:
f.write(doc + '\n')
with open(dst_out_path, 'w', encoding='utf-8') as f:
for d in new_dst:
f.write(d + '\n')
def preprocess_wangyue_data(post_path, conv_path, tag_path, src_path, dst_path):
post_list = []
with open(post_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
post_list.append(line.strip())
conv_list = []
with open(conv_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
conv_list.append(line.strip())
tag_list = []
with open(tag_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip("\n")
if not line:
continue
tag_list.append(line.strip())
assert len(post_list) == len(conv_list) and len(conv_list) == len(tag_list)
src_list = []
dst_list = []
for i in range(len(post_list)):
src = post_list[i] + '. ' + conv_list[i]
dst = tag_list[i].replace(';', ' [SEP] ')
src_list.append(src)
dst_list.append(dst)
with open(src_path, 'w', encoding='utf-8') as f:
for src in src_list:
f.write(src + '\n')
with open(dst_path, 'w', encoding='utf-8') as f:
for dst in dst_list:
f.write(dst + '\n')
def compute_hashtag_coverage(labels, hashtags):
total_r = len(labels)
total_p = len(hashtags)
label_list = labels.copy()
hashtag_list = hashtags.copy()
true_num = 0
for lab in label_list:
for hashtag in hashtag_list:
if lab == hashtag:
true_num += 1
hashtag_list.remove(lab)
break
p = true_num / total_p
r = true_num / total_r | f = f1(p, r) | 3 | 2023-12-16 06:00:53+00:00 | 8k |
ilyamiro/Stewart | GUI/console_gui.py | [
{
"identifier": "Core",
"path": "Core/Core.py",
"snippet": "class Core:\n def __init__(self):\n core_logger.debug(\"Core execution started\")\n\n # Initializing main supportive classes\n self.synthesizer = Synthesizer()\n self.recognition = Voice()\n self.data = Data()\n self.command_tree = CommandTree()\n\n # plugin system handling\n self.plugin_system = PluginOperation()\n self.__plugins_register__()\n\n # handling variables\n self.__handle_variables__()\n\n # plugins installation\n self.__plugins_load__()\n self.__install_plugins__()\n\n self.__registered_commands_update__()\n\n core_logger.debug(\"Core loading ended\")\n\n self.said_word = \"\"\n self.power = False\n\n def start(self):\n thread = threading.Thread(target=self.__speech_recognition__)\n thread.start()\n\n def __handle_variables__(self):\n # opening config file\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\") as file:\n config = json.load(file)\n # setting config variables to use as attributes inside a class\n for key, data in config.items():\n self.__setattr__(key, data)\n # setting other required arguments, that are not supposed to be changed by user\n self.loaded_plugins = []\n self.log_dict: dict[str: str] = {}\n self.plugin_ref: dict[str: dict] = {}\n self.plugin_enable: dict[str: bool] = {}\n self.multi_handle_answers = [\"Конечно, сэр\", \"Выполняю, сэр\", \"Есть, сэр\"]\n self.default_answers = [\"Я тут, сэр\", \"Слушаю вас, сэр\", \"Что-то хотели, сэр?\", \"Всегда к вашим услугам, сэр\",\n \"Я весь внимание, сэр\", \"Добрый день, сэр!\"]\n core_logger.debug(\"Config parameters imported\")\n\n def __speech_recognition__(self):\n core_logger.debug(\"Speech recognition started\")\n while True:\n # iterating through speech recognition generator\n for data in self.recognition.listen():\n # setting recognized data as an attribute for access to the current data in outer field in class\n self.said_word = data\n # logging\n self.log(\"user\", data)\n # checking if the application is enabled\n if self.power:\n # removing trigger word from data for it no to bother the main command processin\n request = self.__remove_trigger_word__(data)\n if request != \"-\":\n self.__handle_input__(request)\n\n def __handle_input__(self, request):\n if not request:\n thread = threading.Thread(target=self.synthesizer.say, args=[random.choice(self.default_answers)])\n thread.start()\n else:\n total = self.__multihandle__(request)\n if len(total) == 1:\n result = self.command_tree.find_command(total[0])\n if result:\n if result[2]:\n thread = threading.Thread(target=self.synthesizer.say, args=[random.choice(result[2])])\n thread.start()\n result = list(result)\n result.extend([total[0], request])\n self.__synthesis_handler__(result)\n elif len(total) > 1:\n thread = threading.Thread(target=self.synthesizer.say, args=[random.choice(self.multi_handle_answers)])\n thread.start()\n said = False\n for command in total:\n result = self.command_tree.find_command(command)\n\n if result:\n result = list(result)\n result.extend([total[0], request])\n output = getattr(self, result[0])(parameters=result[1], command=result[3], request=result[4])\n if output and not said:\n said = True\n choiced = random.choice(self.multi_handle_answers)\n thread = threading.Thread(target=self.synthesizer.say,\n args=[choiced])\n thread.start()\n self.log(\"assistant\", choiced)\n\n def __synthesis_handler__(self, request):\n @self.__say_dec__(request[2])\n def __send_handler_request__():\n return getattr(self, request[0])(parameters=request[1], command=request[3], request=request[4])\n\n __send_handler_request__()\n\n def __say_dec__(self, synthesis):\n def decorator(func):\n def wrapper():\n if synthesis:\n self.log(\"assistant\", synthesis)\n to_say = func()\n if to_say is not None and not synthesis:\n choiced = to_say if not isinstance(to_say, list) else random.choice(to_say)\n self.log(\"assistant\", choiced)\n thread = threading.Thread(target=self.synthesizer.say,\n args=[choiced])\n thread.start()\n\n return wrapper\n\n return decorator\n\n def __install_plugins__(self):\n for plugin in self.loaded_plugins:\n plugin_instance = plugin()\n if plugin_instance.info.type == \"voice\":\n plugin_instance.__add_commands__()\n self.plugin_enable[plugin_instance.info.name] = True\n self.default_answers.extend(plugin_instance.default_answers)\n self.multi_handle_answers.extend(plugin_instance.multi_handle_answers)\n for handler in plugin_instance.handlers:\n self.__setattr__(handler, plugin_instance.__getattribute__(handler))\n core_logger.debug(f\"Voice plugin {plugin_instance.info.name} has been set\")\n elif plugin_instance.info.type == \"command\":\n plugin_instance.__add_commands__()\n self.plugin_enable[plugin_instance.info.name] = True\n self.command_tree.add_commands(plugin_instance.command_tree)\n self.plagin_commands(plugin_instance.info.name, plugin_instance.command_tree)\n self.__register_commands__(plugin_instance.command_tree)\n for handler in plugin_instance.handlers:\n self.__setattr__(handler, plugin_instance.__getattribute__(handler))\n for var in plugin_instance.variables.keys():\n self.__setattr__(var, plugin_instance.variables[var])\n core_logger.debug(f\"Command Plugin {plugin_instance.info.name} has been set\")\n self.set_plugins_status()\n\n def disable_plagin(self, name):\n if self.plugin_enable[name]:\n self.plugin_enable[name] = False\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json\", \"r\") as file:\n data = json.load(file)\n data: dict\n for command in self.plugin_ref[name].keys():\n self.command_tree.delete_command(command)\n to_remove = \" \".join(command)\n if data[to_remove]:\n del data[to_remove]\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json\", \"w\") as file:\n json.dump(data, file, ensure_ascii=False)\n self.set_plugins_status()\n\n def enable_plugin(self, name):\n if not self.plugin_enable[name]:\n self.plugin_enable[name] = True\n self.__register_commands__(self.plugin_ref[name])\n self.__registered_commands_update__()\n self.set_plugins_status()\n\n def set_plugins_status(self):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"r\") as file:\n data = json.load(file)\n data[\"plugins_status\"] = self.plugin_enable\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/config.json\", \"w\") as file:\n json.dump(data, file, ensure_ascii=False)\n\n @staticmethod\n def __register_commands__(commands: dict):\n updated_commands = {}\n for element in commands.keys():\n updated_commands[\" \".join(element)] = commands[element]\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json\", \"r\") as file:\n data = json.load(file)\n data: dict\n data.update(updated_commands)\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json\", \"w\") as file:\n json.dump(data, file, ensure_ascii=False)\n\n def __registered_commands_update__(self):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/commands.json\", \"r\") as file:\n data = json.load(file)\n data: dict\n updated_commands = {}\n for element in data.keys():\n updated_commands[tuple(element.split())] = data[element]\n self.command_tree.add_commands(updated_commands)\n\n def __remove_trigger_word__(self, request):\n for word in self.trigger_words:\n if word in request:\n request = \" \".join(request.split(word)[1:])[1:]\n return request\n return \"-\"\n\n def log(self, author, text):\n self.log_dict[author] = text\n\n def log_dump(self):\n with open(f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Database/log.txt\", \"a\") as file:\n file.write(self.log_dict.__str__())\n\n def plagin_commands(self, name, commands):\n self.plugin_ref[name] = commands\n\n def __multihandle__(self, request):\n list_of_commands = []\n current_command = []\n split_request = request.split()\n for word in split_request:\n if word in self.command_tree.first_words:\n if current_command:\n list_of_commands.append(current_command)\n if word in [\"найди\", \"поиск\", \"найти\", \"напиши\", \"запиши\"]:\n current_command = split_request[split_request.index(word):]\n list_of_commands.append(current_command)\n current_command = []\n break\n current_command = [word]\n else:\n if current_command and word != \"и\":\n current_command.append(word)\n elif not current_command and word != \"и\":\n current_command = [word]\n if current_command:\n list_of_commands.append(current_command)\n return list_of_commands\n\n def __plugins_load__(self):\n for plugin in self.plugins:\n self.plugin_system: PluginOperation\n self.loaded_plugins.append(self.plugin_system.__plugin_load__(plugin[\"name\"]))\n core_logger.info(f\"Plugin {plugin['name']} was successfully loaded\")\n\n def __plugins_register__(self):\n core_logger.debug(\"Started plugin registering\")\n plugin_path = f\"{os.path.abspath(os.path.join(os.getcwd(), os.pardir))}/Plugin\"\n for plugin in os.listdir(plugin_path):\n if os.path.isdir(f\"{plugin_path}/{plugin}\") and not plugin.startswith(\"__\"):\n self.plugin_system: PluginOperation\n spec = importlib.util.spec_from_file_location(\"main\", f\"{plugin_path}/{plugin}/main.py\")\n plugin_module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(plugin_module)\n plugin_instance = plugin_module.Main()\n with open(f\"{plugin_path}/{plugin}/config.json\", \"r\") as file:\n data = json.load(file)\n plugin_instance.info = PluginInfo(*data.values())\n self.plugin_system.register_plugin(plugin_instance)"
},
{
"identifier": "CommandTree",
"path": "Command_System/CommandTree.py",
"snippet": "class CommandTree:\n \"\"\"\n Represents a trie structure for storing and retrieving voice assistant commands.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes a CommandTree with a root CommandNode.\n \"\"\"\n self.root = CommandNode()\n self.synonym_map = {} # Synonym mapping for words with the same meaning\n self.first_words = set()\n\n def add_synonym(self, synonym, canonical):\n \"\"\"\n Adds a synonym to the synonym map.\n\n Parameters:\n - synonym: The synonym word.\n - canonical: The canonical form of the word.\n \"\"\"\n self.synonym_map[synonym] = canonical\n\n def expand_synonyms(self, words):\n \"\"\"\n Expands synonyms in a list of words based on the synonym map.\n\n Parameters:\n - words: A list of words.\n\n Returns:\n - A list of expanded words.\n \"\"\"\n\n expanded_words = [self.synonym_map.get(word, word) for word in words]\n\n return expanded_words\n\n def delete_command(self, command):\n \"\"\"\n Deletes a command from the CommandTree.\n\n Parameters:\n - command: The command to be deleted (as a list of parts).\n \"\"\"\n expanded_command = self.expand_synonyms(command)\n self._delete_command_recursive(self.root, expanded_command)\n\n def _delete_command_recursive(self, node, command):\n \"\"\"\n Recursive method to delete a command from the CommandTree.\n\n Parameters:\n - node: The current node in the trie.\n - command: The remaining parts of the command (as a list).\n\n Returns:\n - True if the command was deleted, False otherwise.\n \"\"\"\n if not command:\n # If the command is empty, we've reached the end of the command\n # and can remove the handler and related information.\n node.handler = None\n node.parameters = None\n node.synthesize = None\n return True\n\n part = command[0]\n if part in node.children:\n # Recursively traverse the tree until the end of the command is reached.\n if self._delete_command_recursive(node.children[part], command[1:]):\n # If the child node returns True, it means the command was deleted,\n # so we can remove the child node if it's not needed.\n if not node.children[part].handler and not node.children[part].children:\n del node.children[part]\n return True\n\n return False\n\n def add_commands(self, commands):\n \"\"\"\n Adds multiple commands to the CommandTree.\n\n Parameters:\n - commands: A dictionary where keys are command parts (as tuples) and values are command details.\n \"\"\"\n for command, details in commands.items():\n self.first_words.add(command[0])\n synonyms = details.get(\"synonyms\")\n if synonyms:\n for synonim in synonyms:\n self.add_synonym(synonim, synonyms[synonim])\n expanded_command = self.expand_synonyms(command)\n self._add_command_recursive(self.root, tuple(expanded_command), details.get(\"handler\"),\n details.get(\"parameters\"), details.get(\"synthesize\"))\n\n def _add_command_recursive(self, node, command, handler, parameters=None, synthesize=None):\n \"\"\"\n Recursive method to add a command to the CommandTree.\n\n Parameters:\n - node: The current node in the trie.\n - command: The remaining parts of the command (as a tuple).\n - handler: The action associated with the command.\n - parameters: Additional parameters associated with the command.\n - synthesize: Speech synthesis information.\n\n Returns:\n - The current node after adding the command.\n \"\"\"\n if not command:\n node.handler = handler\n node.parameters = parameters\n node.synthesize = synthesize\n node.command = command # Assign the original command here\n return node # Return the current node\n\n part = command[0]\n if part not in node.children:\n node.children[part] = CommandNode()\n\n return self._add_command_recursive(node.children[part], command[1:], handler, parameters, synthesize)\n\n def find_command(self, command):\n \"\"\"\n Finds a command in the CommandTree.\n\n Parameters:\n - command: The command to search for (as a list of parts).\n\n Returns:\n - A tuple containing the handler, parameters, synthesize information, and the full command string.\n \"\"\"\n expanded_command = self.expand_synonyms(command)\n node = self.root\n found_one = 0\n result_handler, result_parameters, result_synthesize = None, None, None\n for part in expanded_command:\n if part in node.children:\n found_one += 1\n node = node.children[part]\n if node.synthesize:\n result_synthesize = node.synthesize\n else:\n if found_one >= 1 and node.handler:\n return node.handler, node.parameters, result_synthesize\n return None # Command not found\n\n return node.handler, node.parameters, result_synthesize\n\n def get_child_commands(self, command):\n \"\"\"\n Returns all child commands of the given command.\n\n Parameters:\n - command: The command to get child commands for (as a list of parts).\n\n Returns:\n - A list of tuples, each containing a child command and its details (handler, parameters, synthesize).\n \"\"\"\n expanded_command = self.expand_synonyms(command)\n node = self.root\n\n for part in expanded_command:\n if part in node.children:\n node = node.children[part]\n else:\n return [] # Command not found\n\n return self._get_all_child_commands(node)\n\n def _get_all_child_commands(self, node):\n \"\"\"\n Recursive method to get all child commands of a given node.\n\n Parameters:\n - node: The current node in the trie.\n\n Returns:\n - A list of tuples, each containing a child command and its details (handler, parameters, synthesize).\n \"\"\"\n child_commands = []\n\n for part, child_node in node.children.items():\n child_command = [part] + self._get_command_suffix(child_node)\n child_handler = child_node.handler\n child_parameters = child_node.parameters\n child_synthesize = child_node.synthesize\n child_commands.append((child_command, child_handler, child_parameters, child_synthesize))\n\n # Recursively get child commands for each child node\n child_commands.extend(self._get_all_child_commands(child_node))\n\n return child_commands\n\n def _get_command_suffix(self, node):\n \"\"\"\n Helper method to get the command suffix for a given node.\n\n Parameters:\n - node: The current node in the trie.\n\n Returns:\n - A list of command parts representing the suffix of the command.\n \"\"\"\n if not node.children:\n return []\n else:\n child_part = next(iter(node.children.keys()))\n return [child_part] + self._get_command_suffix(node.children[child_part])"
}
] | import random
import sys
import threading
import subprocess
import os
import time
import psutil
from rich import print as rprint
from rich.table import Table
from rich.progress import track
from rich.console import Console
from rich.markdown import Markdown
from rich.prompt import Prompt, Confirm
from rich.panel import Panel, Style
from rich.tree import Tree
from rich.layout import Layout
from Core.Core import Core
from Command_System.CommandTree import CommandTree | 4,581 |
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
sys.path.append(os.path.abspath(os.getcwd()))
if sys.platform == "linux":
# required for proper pyautogui and audio workflow
subprocess.Popen("jack_control start", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
subprocess.Popen("xhost +local:$USER", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
class GUI:
def __init__(self):
self.std = Console()
self.input = Prompt.ask
self.print = rprint
self.plugin_nums = {}
self.back = """
Press [strong]Enter[/] to go back [red]<--[/]
"""
|
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
sys.path.append(os.path.abspath(os.getcwd()))
if sys.platform == "linux":
# required for proper pyautogui and audio workflow
subprocess.Popen("jack_control start", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
subprocess.Popen("xhost +local:$USER", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
class GUI:
def __init__(self):
self.std = Console()
self.input = Prompt.ask
self.print = rprint
self.plugin_nums = {}
self.back = """
Press [strong]Enter[/] to go back [red]<--[/]
""" | self.core = Core() | 0 | 2023-12-16 12:24:15+00:00 | 8k |
LLM-Evaluation-s-Always-Fatiguing/leaf-playground-hub | who_is_the_spy/who_is_the_spy/scene.py | [
{
"identifier": "Moderator",
"path": "who_is_the_spy/who_is_the_spy/agents/moderator.py",
"snippet": "class Moderator(\n SceneStaticAgent,\n role_definition=ROLE_DEFINITION,\n cls_description=\"An agent who moderate the game: Who is the Spy\"\n):\n config_cls = ModeratorConfig\n config: config_cls\n\n game_rule_with_blank = (\n \"You are playing a game of Who is the spy. Here are the game rules:\\n\\n\"\n \"## Information and roles\\n\\n\"\n \"There are three roles in \\\"Who is the Spy?\\\": Spy, Civilian, and Blank Slate.\\n\"\n \"- Civilians are shown the correct key.\\n\"\n \"- Spies see a key similar to the correct one but incorrect.\\n\"\n \"- Blank Slates receive a blank clue.\\n\"\n \"Your role is unknown to you, so careful listening and inference are crucial to identify the spy.\\n\\n\"\n \"## Objectives\\n\\n\"\n \"Your objectives vary based on your role:\\n\"\n \"- As a Civilian, your aim is to identify and vote out the Spy and the Blank Slate, without revealing the \"\n \"correct key. Focus first on finding the Blank Slate.\\n\"\n \"- If you're the Spy, your goal is to blend in, avoid detection, and survive the voting. Winning occurs if \"\n \"at least one Spy remains at the end.\\n\"\n \"- As a Blank Slate, try to uncover and vote out the Spy without revealing your own role. You can guess and \"\n \"describe what you think is the correct key.\\n\\n\"\n \"## Stages\\n\\n\"\n \"The game has two main stages and one special scenario:\\n\"\n \"1. Giving Clues Stage: Each player gives clues about their key. Blank Slates can describe anything \"\n \"they choose.\\n\"\n \"2. Accusation Stage: Here, Civilians vote for who they suspect is the Spy or Blank Slate. Spies vote \"\n \"for a likely Civilian or Blank Slate. Blank Slates vote for their suspected Spy.\\n\"\n \"3. Tiebreaker Scenario: In the event of a tie, those with the most votes will re-describe their key, \"\n \"and a new vote takes place among them.\\n\\n\"\n \"## Code of Conduct\\n\\n\"\n \"Here are five rules of behavior you need to follow:\\n\"\n \"- Your clues should be brief and not include the key.\\n\"\n \"- Your clues can't duplicate the previous one.\\n\"\n \"- Do not pretend you are other players or the moderator.\\n\"\n \"- You cannot vote for yourself.\\n\"\n \"- Always end your response with <EOS>.\"\n )\n game_rule_without_blank = (\n \"You are playing a game of Who is the spy. Here are the game rules:\\n\\n\"\n \"## Information and roles\\n\\n\"\n \"There are two roles in \\\"Who is the Spy?\\\": Spy and Civilian.\\n\"\n \"- Civilians are shown the correct key.\\n\"\n \"- Spies see a key similar to the correct one but incorrect.\\n\"\n \"Your role is unknown to you, so careful listening and inference are crucial to identify the spy.\\n\\n\"\n \"## Objectives\\n\\n\"\n \"Your objectives vary based on your role:\\n\"\n \"- As a Civilian, your aim is to identify and vote out the Spy, without revealing the correct key.\\n\"\n \"- If you're the Spy, your goal is to blend in, avoid detection, and survive the voting. Winning occurs \"\n \"if at least one Spy remains at the end.\\n\\n\"\n \"## Stages\\n\\n\"\n \"The game has two main stages and one special scenario:\\n\"\n \"1. Giving Clues Stage: Each player gives clues about their key.\\n\"\n \"2. Accusation Stage: Here, Civilians vote for who they suspect is the Spy or Blank Slate.Spies vote for \"\n \"a likely Civilian or Blank Slate.\\n\"\n \"3. Tiebreaker Scenario: In the event of a tie, those with the most votes will re-describe their key, and \"\n \"a new vote takes place among them.\\n\\n\"\n \"## Code of Conduct\\n\\n\"\n \"Here are five rules of behavior you need to follow:\\n\"\n \"- Your clues should be brief and not include the key.\\n\"\n \"- Your clues can't duplicate the previous one.\\n\"\n \"- Do not pretend you are other players or the moderator.\\n\"\n \"- You cannot vote for yourself.\\n\"\n \"- Always end your response with <EOS>.\"\n )\n\n def __init__(self, config: config_cls):\n super().__init__(config=config)\n\n self.id2role: Dict[str, PlayerRoles] = {}\n self.role2players: Dict[PlayerRoles, List[Profile]] = {\n PlayerRoles.CIVILIAN: [],\n PlayerRoles.SPY: [],\n PlayerRoles.BLANK: []\n }\n self.id2player: Dict[str, Profile] = {}\n self.id2status: Dict[str, PlayerStatus] = {}\n self.civilian_key: KeyTypes = None\n self.spy_key: KeyTypes = None\n\n async def registry_players(self, players: List[Profile]) -> None:\n for player in players:\n self.id2player[player.id] = player\n self.id2status[player.id] = PlayerStatus.ALIVE\n\n async def init_game(self) -> ModeratorInitGameSummary:\n num_players = len(self.id2player)\n has_blank = self.env_var[\"has_blank\"].current_value\n key_modality = self.env_var[\"key_modality\"].current_value\n roles_assignment_strategy = {\n 4: {\n PlayerRoles.CIVILIAN: 3,\n PlayerRoles.SPY: 1,\n PlayerRoles.BLANK: 0\n },\n 5: {\n PlayerRoles.CIVILIAN: 3 if has_blank else 4,\n PlayerRoles.SPY: 1,\n PlayerRoles.BLANK: 1 if has_blank else 0\n },\n 6: {\n PlayerRoles.CIVILIAN: 4 if has_blank else 5,\n PlayerRoles.SPY: 1,\n PlayerRoles.BLANK: 1 if has_blank else 0\n },\n 7: {\n PlayerRoles.CIVILIAN: 4 if has_blank else 5,\n PlayerRoles.SPY: 2,\n PlayerRoles.BLANK: 1 if has_blank else 0\n },\n 8: {\n PlayerRoles.CIVILIAN: 5 if has_blank else 6,\n PlayerRoles.SPY: 2,\n PlayerRoles.BLANK: 1 if has_blank else 0\n },\n 9: {\n PlayerRoles.CIVILIAN: 6 if has_blank else 7,\n PlayerRoles.SPY: 2,\n PlayerRoles.BLANK: 1 if has_blank else 0\n }\n }\n roles_agent_num = roles_assignment_strategy[num_players]\n\n roles = list(chain(*[[role] * agent_num for role, agent_num in roles_agent_num.items()]))\n random.shuffle(roles) # shuffle to randomize the role assignment\n for player_id, role in zip(list(self.id2player.keys()), roles):\n self.role2players[role].append(self.id2player[player_id])\n self.id2role[player_id] = role\n\n if key_modality == KeyModalities.TEXT:\n keys = random.choice(load_textual_key())\n self.civilian_key, self.spy_key = Text(text=keys[\"Civilian\"]), Text(text=keys[\"Spy\"])\n elif key_modality == KeyModalities.IMAGE:\n keys = random.choice(load_image_key())\n self.civilian_key, self.spy_key = Image(url=keys[\"Civilian\"]), Image(url=keys[\"Spy\"])\n else:\n raise NotImplementedError(f\"[{key_modality.value}] modal not supported yet.\")\n\n role_assign_summary = \"\\n\".join(\n [f\"- {role.value} :: {[f'{player.name}({player.id})' for player in players]}\"\n for role, players in self.role2players.items()]\n )\n if key_modality == KeyModalities.TEXT:\n key_assign_summary = f\"- civilian :: {self.civilian_key.text}\\n- spy :: {self.spy_key.text}\"\n elif key_modality == KeyModalities.IMAGE:\n key_assign_summary = (\n f\"\"\"\n <div style=\"width:100%;display:flex;flex-direction:row;justify-content:flex-start;align-items:flex-start;\">\n <figure>\n <figcaption>Civilian</figcaption>\n <img src=\"{self.civilian_key.url}\" alt=\"{PlayerRoles.CIVILIAN.value}\" width=\"50%\" />\n </figure>\n <figure>\n <figcaption>Spy</figcaption>\n <img src=\"{self.spy_key.url}\" alt=\"{PlayerRoles.CIVILIAN.value}\" width=\"50%\" />\n </figure>\n </div>\n \"\"\".strip()\n )\n else:\n raise NotImplementedError(f\"[{key_modality.value}] modal not supported yet.\")\n msg = (\n f\"## Roles and Keys assignment Results\\n\\n\"\n f\"### Roles\\n{role_assign_summary}\\n\\n### Keys\\n{key_assign_summary}\"\n )\n return ModeratorInitGameSummary(\n sender=self.profile,\n receivers=[self.profile],\n content=Text(text=msg, display_text=msg),\n role2players={role.value: [p.name for p in players] for role, players in self.role2players.items()},\n keys={\"civilian\": self.civilian_key.display_text, \"spy\": self.spy_key.display_text}\n )\n\n async def introduce_game_rule(self) -> ModeratorSummary:\n has_blank = self.env_var[\"has_blank\"].current_value\n msg = self.game_rule_with_blank if has_blank else self.game_rule_without_blank\n return ModeratorSummary(\n sender=self.profile,\n receivers=list(self.id2player.values()),\n content=Text(text=msg, display_text=msg)\n )\n\n async def announce_game_start(self) -> ModeratorSummary:\n num_players = len(self.id2player)\n role2word = {\n PlayerRoles.CIVILIAN: \"civilians\",\n PlayerRoles.SPY: \"spies\",\n PlayerRoles.BLANK: \"blanks\"\n }\n roles_num_description = \", \".join(\n [f\"{len(role_players)} {role2word[role]}\" for role, role_players in self.role2players.items()]\n )\n msg = (\n f\"Now the game begins! There are {num_players} players in this game, including \"\n f\"{roles_num_description}.\"\n )\n return ModeratorSummary(\n sender=self.profile,\n receivers=list(self.id2player.values()),\n content=Text(text=msg, display_text=msg)\n )\n\n async def assign_keys(self, player: Profile) -> ModeratorKeyAssignment:\n role = self.id2role[player.id]\n if role == PlayerRoles.CIVILIAN:\n return ModeratorKeyAssignment.create_with_key(\n key=self.civilian_key, sender=self.profile, receiver=player\n )\n elif role == PlayerRoles.SPY:\n return ModeratorKeyAssignment.create_with_key(\n key=self.spy_key, sender=self.profile, receiver=player\n )\n else:\n return ModeratorKeyAssignment.create_without_key(\n sender=self.profile, receiver=player\n )\n\n async def ask_for_key_description(self) -> ModeratorAskForDescription:\n return ModeratorAskForDescription.create(\n sender=self.profile,\n receivers=[\n player for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE\n ]\n )\n\n async def valid_player_description(self, description: PlayerDescription) -> ModeratorWarning:\n player_id = description.sender_id\n player_role = self.id2role[player_id]\n if player_role != PlayerRoles.BLANK and self.env_var[\"key_modality\"].current_value == KeyModalities.TEXT:\n warn_msg = (\n \"Your description contains your key, which is not allowed, please redo the description. Reply \"\n \"only description of your key, without any additional response.\"\n )\n if (player_role == PlayerRoles.CIVILIAN and self.civilian_key.text.lower() in description.content.text.lower()) or \\\n (player_role == PlayerRoles.SPY and self.spy_key.text.lower() in description.content.text.lower()):\n return ModeratorWarning(\n sender=self.profile,\n receivers=[description.sender],\n content=Text(text=warn_msg, display_text=warn_msg),\n has_warn=True\n )\n return ModeratorWarning(\n sender=self.profile,\n receivers=[description.sender],\n content=Text(text=\"\", display_text=\"\"),\n has_warn=False\n )\n\n async def ask_for_role_prediction(self) -> ModeratorAskForRolePrediction:\n has_blank = self.env_var[\"has_blank\"].current_value\n return ModeratorAskForRolePrediction.create(\n sender=self.profile,\n receivers=[\n player for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE\n ],\n player_names=[\n player.name for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE\n ],\n has_blank_slate=has_blank\n )\n\n async def summarize_players_prediction(self, predictions: List[PlayerPrediction]) -> ModeratorPredictionSummary:\n has_blank = self.env_var[\"has_blank\"].current_value\n summaries = []\n extracted_predictions = {}\n for prediction in predictions:\n preds = prediction.get_prediction(\n player_names=[player.name for player in self.id2player.values()],\n has_blank_slate=has_blank\n )\n extracted_predictions[prediction.sender_name] = {role.value: list(names) for role, names in preds.items()}\n summary = (\n f\"### {prediction.sender_name}({prediction.sender_id})'s prediction\\n\"\n f\"- {PlayerRoles.SPY.value} :: {list(preds[PlayerRoles.SPY])}\"\n )\n if has_blank:\n summary += f\"\\n- {PlayerRoles.BLANK.value} :: {list(preds[PlayerRoles.BLANK])}\"\n summaries.append(summary)\n\n alive_spies = [\n player.name for player in self.role2players[PlayerRoles.SPY]\n if self.id2status[player.id] == PlayerStatus.ALIVE\n ]\n label = (\n f\"### Correct Answer\\n- {PlayerRoles.SPY.value} :: {alive_spies}\"\n )\n ground_truth = {PlayerRoles.SPY.value: alive_spies}\n if has_blank:\n alive_blanks = [\n player.name for player in self.role2players[PlayerRoles.BLANK]\n if self.id2status[player.id] == PlayerStatus.ALIVE\n ]\n label += f\"\\n- {PlayerRoles.BLANK.value} :: {alive_blanks}\"\n ground_truth[PlayerRoles.BLANK.value] = alive_blanks\n msg = \"\\n\\n\".join(summaries) + f\"\\n\\n{label}\"\n return ModeratorPredictionSummary(\n sender=self.profile,\n receivers=[self.profile],\n content=Text(text=msg, display_text=msg),\n predictions=extracted_predictions,\n ground_truth=ground_truth\n )\n\n async def ask_for_vote(self) -> ModeratorAskForVote:\n has_blank = self.env_var[\"has_blank\"].current_value\n return ModeratorAskForVote.create(\n sender=self.profile,\n receivers=[\n player for player in self.id2player.values() if self.id2status[player.id] == PlayerStatus.ALIVE\n ],\n has_blank_slate=has_blank\n )\n\n async def summarize_player_votes(\n self,\n votes: List[PlayerVote],\n focused_players: Optional[List[Profile]]\n ) -> ModeratorVoteSummary:\n def get_most_voted_players() -> List[Profile]:\n eliminated_names = [\n player_name for player_name, num_be_voted in player2num_be_voted.items() if\n num_be_voted == max(player2num_be_voted.values())\n ]\n return [player for player in self.id2player.values() if player.name in eliminated_names]\n\n player2num_be_voted = {player.name: 0 for player in self.id2player.values()}\n player2votes = {}\n for vote in votes:\n vote_to = vote.get_vote([player.name for player in self.id2player.values()])\n if not vote_to:\n continue\n player2votes[vote.sender_name] = vote_to\n player2num_be_voted[vote_to] += 1\n if focused_players:\n focused_names = [p.name for p in focused_players]\n for player_name in player2num_be_voted:\n if player_name not in focused_names:\n player2num_be_voted[player_name] = 0\n\n voting_detail = \"\\n\".join([f\"{voter} votes to {voted}\" for voter, voted in player2votes.items()]) + \"\\n\"\n if focused_players:\n voting_detail += (\n f\"This is a re-voting turn, we will only focus on the votes {[p.name for p in focused_players]} got.\\n\"\n )\n most_voted_players = get_most_voted_players()\n if len(most_voted_players) > 1: # tied\n msg = (\n f\"{voting_detail}{[p.name for p in most_voted_players]} are having the same \"\n f\"votes, for those players, please re-describe the key you received again.\"\n )\n return ModeratorVoteSummary(\n sender=self.profile,\n receivers=[player for player in self.id2player.values()],\n content=Text(text=msg, display_text=msg),\n tied_players=most_voted_players,\n player_received_votes=player2num_be_voted,\n players_voted_to=player2votes\n )\n else: # eliminate\n for player in most_voted_players:\n self.id2status[player.id] = PlayerStatus.ELIMINATED\n msg = f\"{voting_detail}{most_voted_players[0].name} has the most votes and is eliminated.\"\n return ModeratorVoteSummary(\n sender=self.profile,\n receivers=[player for player in self.id2player.values()],\n content=Text(text=msg, display_text=msg),\n player_received_votes=player2num_be_voted,\n players_voted_to=player2votes\n )\n\n async def check_if_game_over(self) -> ModeratorCheckGameOverSummary:\n def return_game_over(role: PlayerRoles):\n winners = [\n player.name for player in self.role2players[role]\n if self.id2status[player.id] == PlayerStatus.ALIVE\n ]\n msg = f\"Game Over! {role.value} win, winners are: {winners}.\"\n return ModeratorCheckGameOverSummary(\n sender=self.profile,\n receivers=[player for player in self.id2player.values()],\n content=Text(text=msg, display_text=msg),\n is_game_over=True,\n winners=winners\n )\n\n has_blank = self.env_var[\"has_blank\"].current_value\n num_players = len(self.id2player)\n num_alive_players = len(\n [player for player, status in self.id2status.items() if status == PlayerStatus.ALIVE]\n )\n num_alive_civilians = len(\n [\n player for player in self.role2players[PlayerRoles.CIVILIAN]\n if self.id2status[player.id] == PlayerStatus.ALIVE\n ]\n )\n num_alive_spies = len(\n [\n player for player in self.role2players[PlayerRoles.SPY]\n if self.id2status[player.id] == PlayerStatus.ALIVE\n ]\n )\n if num_alive_civilians == num_alive_players: # civilians win\n return return_game_over(PlayerRoles.CIVILIAN)\n if (\n (num_players > 6 and num_alive_players <= 3 and num_alive_spies > 0) or\n (num_players <= 6 and num_alive_players <= 2 and num_alive_spies > 0)\n ): # spies win\n return return_game_over(PlayerRoles.SPY)\n if has_blank and num_alive_spies == 0 and num_alive_civilians != num_alive_players: # blank wins\n return return_game_over(PlayerRoles.BLANK)\n\n msg = f\"Not any side wins, game continues.\"\n return ModeratorCheckGameOverSummary(\n sender=self.profile,\n receivers=[player for player in self.id2player.values()],\n content=Text(text=msg, display_text=msg),\n is_game_over=False,\n winners=None\n )\n\n async def reset_inner_status(self):\n self.id2role: Dict[str, PlayerRoles] = {}\n self.role2players: Dict[PlayerRoles, List[Profile]] = {\n PlayerRoles.CIVILIAN: [],\n PlayerRoles.SPY: [],\n PlayerRoles.BLANK: []\n }\n self.id2player: Dict[str, Profile] = {}\n self.id2status: Dict[str, PlayerStatus] = {}\n self.civilian_key: Union[Audio, Image, Text] = None\n self.spy_key: Union[Audio, Image, Text] = None"
},
{
"identifier": "BaseAIPlayer",
"path": "who_is_the_spy/who_is_the_spy/agents/player.py",
"snippet": "class BaseAIPlayer(\n SceneAIAgent,\n ABC,\n role_definition=ROLE_DEFINITION,\n cls_description=\"An AI agent who participants in the game Who is the Spy as a player\"\n):\n config_cls = BaseAIPlayerConfig\n config: config_cls\n\n def __init__(self, config: config_cls):\n super().__init__(config=config)\n\n @abstractmethod\n async def receive_key(self, key_assignment: ModeratorKeyAssignment) -> None:\n pass\n\n @abstractmethod\n async def describe_key(self, history: List[MessageTypes], receivers: List[Profile]) -> PlayerDescription:\n pass\n\n @abstractmethod\n async def predict_role(self, history: List[MessageTypes], moderator: Profile) -> PlayerPrediction:\n pass\n\n @abstractmethod\n async def vote(self, history: List[MessageTypes], moderator: Profile) -> PlayerVote:\n pass\n\n @abstractmethod\n async def reset_inner_status(self):\n pass"
},
{
"identifier": "HumanPlayer",
"path": "who_is_the_spy/who_is_the_spy/agents/human_player.py",
"snippet": "class HumanPlayer(\n SceneHumanAgent,\n role_definition=ROLE_DEFINITION,\n cls_description=\"A human who participants in the game Who is the Spy as a player\"\n):\n config_cls = HumanPlayerConfig\n config: config_cls\n\n def __init__(self, config: config_cls):\n super().__init__(config=config)\n\n async def receive_key(self, key_assignment: ModeratorKeyAssignment) -> None:\n pass\n\n async def describe_key(self, history: List[MessageTypes], receivers: List[Profile]) -> PlayerDescription:\n description = (await self.wait_human_text_input()) or \"\"\n return PlayerDescription(\n sender=self.profile,\n receivers=receivers,\n content=Text(text=description, display_text=description)\n )\n\n async def predict_role(self, history: List[MessageTypes], moderator: Profile) -> PlayerPrediction:\n prediction = (await self.wait_human_text_input()) or \"\"\n return PlayerPrediction(\n sender=self.profile,\n receivers=[moderator, self.profile],\n content=Text(text=prediction, display_text=prediction)\n )\n\n async def vote(self, history: List[MessageTypes], moderator: Profile) -> PlayerVote:\n vote = (await self.wait_human_text_input()) or \"\"\n return PlayerVote(\n sender=self.profile,\n receivers=[moderator, self.profile],\n content=Text(text=vote, display_text=vote)\n )\n\n async def reset_inner_status(self):\n pass"
}
] | import asyncio
import random
from typing import List, Optional, Type, Union
from pydantic import Field
from leaf_playground.core.workers import Logger
from leaf_playground.core.scene import Scene
from leaf_playground.core.scene_definition import SceneConfig
from leaf_playground.data.log_body import ActionLogBody
from leaf_playground.data.media import Text
from .agents.moderator import Moderator
from .agents.player import BaseAIPlayer
from .agents.human_player import HumanPlayer
from .scene_definition import * | 5,933 |
Player = Union[BaseAIPlayer, HumanPlayer]
class WhoIsTheSpyLogBody(ActionLogBody):
references: Optional[List[MessageTypes]] = Field(default=None)
response: MessageTypes = Field(default=...)
game_id: int = Field(default=...)
round_id: int = Field(default=...)
WhoIsTheSpySceneConfig = SceneConfig.create_config_model(
SCENE_DEFINITION,
additional_config_fields={
"debug_mode": (bool, Field(default=False, exclude=True))
}
)
class WhoIsTheSpyScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=WhoIsTheSpyLogBody):
config_cls = WhoIsTheSpySceneConfig
config: config_cls
log_body_class: Type[WhoIsTheSpyLogBody]
def __init__(self, config: config_cls, logger: Logger):
super().__init__(config=config, logger=logger)
|
Player = Union[BaseAIPlayer, HumanPlayer]
class WhoIsTheSpyLogBody(ActionLogBody):
references: Optional[List[MessageTypes]] = Field(default=None)
response: MessageTypes = Field(default=...)
game_id: int = Field(default=...)
round_id: int = Field(default=...)
WhoIsTheSpySceneConfig = SceneConfig.create_config_model(
SCENE_DEFINITION,
additional_config_fields={
"debug_mode": (bool, Field(default=False, exclude=True))
}
)
class WhoIsTheSpyScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=WhoIsTheSpyLogBody):
config_cls = WhoIsTheSpySceneConfig
config: config_cls
log_body_class: Type[WhoIsTheSpyLogBody]
def __init__(self, config: config_cls, logger: Logger):
super().__init__(config=config, logger=logger)
| self.moderator: Moderator = self.static_agents["moderator"][0] | 0 | 2023-12-21 03:09:08+00:00 | 8k |
djkcyl/ABot-NT | utils/text2image.py | [
{
"identifier": "AdvertisementCategory",
"path": "models/ad.py",
"snippet": "class AdvertisementCategory(str, Enum):\n business = \"商业\"\n public_welfare = \"公益\"\n announcement = \"公告\"\n tips = \"提示\""
},
{
"identifier": "AiohttpClientService",
"path": "services/aiohttp.py",
"snippet": "class AiohttpClientService(Service):\n id = \"http.client/aiohttp\"\n session: ClientSession\n\n def __init__(self, session: ClientSession | None = None) -> None:\n self.session = cast(ClientSession, session)\n super().__init__()\n\n @property\n def stages(self) -> set[str]:\n return {\"preparing\", \"cleanup\"}\n\n @property\n def required(self) -> set:\n return set()\n\n async def launch(self, _: Launart) -> None:\n async with self.stage(\"preparing\"):\n if self.session is None:\n self.session = ClientSession(timeout=ClientTimeout(total=None))\n async with self.stage(\"cleanup\"):\n await self.session.close()"
},
{
"identifier": "S3FileService",
"path": "services/s3file.py",
"snippet": "class S3FileService(Service):\n id: str = \"abot/s3file\"\n\n def __init__(\n self,\n endpoint: str = \"127.0.0.1:8333\",\n access_key: str | None = None,\n secret_key: str | None = None,\n *,\n secure: bool = False,\n ) -> None:\n super().__init__()\n self.s3file = S3File(endpoint, access_key, secret_key, secure=secure)\n\n # def get_interface(self, _) -> Minio:\n # return self.s3file\n\n @property\n def required(self) -> set:\n return set()\n\n @property\n def stages(self) -> set[str]:\n return {\"preparing\"}\n\n async def launch(self, _: Launart) -> None:\n async with self.stage(\"preparing\"):\n if await self.s3file.bucket_exists(\"abot7f8befa44d10\"):\n logger.info(\"S3 Bucket 已存在\")\n else:\n logger.info(\"正在创建 S3 Bucket\")\n await self.s3file.make_bucket(\"abot7f8befa44d10\")\n logger.success(\"S3 Bucket 创建成功\")\n\n test_text = secrets.token_hex(16).encode()\n if await self.s3file.object_exists(\".keep\"):\n await self.s3file.remove_object(\".keep\")\n put_test = await self.s3file.put_object(\".keep\", test_text)\n if put_test:\n logger.info(\"S3 Bucket 可写\")\n else:\n logger.error(\"S3 Bucket 不可写\")\n msg = \"S3 Bucket 不可写\"\n raise S3FileError(msg)\n read_test: ClientResponse = await self.s3file.get_object(\".keep\")\n if await read_test.read() == test_text:\n logger.info(\"S3 Bucket 可读\")\n else:\n logger.error(\"S3 Bucket 不可读\")\n msg = \"S3 Bucket 不可读\"\n raise S3FileError(msg)\n\n logger.success(\"S3 Bucket 测试完成\")"
},
{
"identifier": "ADBuilder",
"path": "utils/builder.py",
"snippet": "class ADBuilder(Advertisement):\n @classmethod\n async def create_ad(\n cls,\n content: str,\n content_type: int,\n category: AdvertisementCategory,\n source: str,\n expire_days: int = 30,\n weight: int = 1,\n target_audience: list[str] | None = None,\n bid_price: int = 0,\n ) -> str:\n if target_audience is None:\n target_audience = []\n while True:\n ad_id = token_hex(8)\n if await cls.find_one(cls.ad_id == ad_id):\n continue\n break\n\n await cls.insert(\n Advertisement(\n ad_id=ad_id,\n content=content,\n content_type=content_type,\n ad_category=category,\n source=source,\n end_date=datetime.now(CHINA_TZ) + timedelta(days=expire_days) if expire_days else datetime.max,\n weight=weight,\n target_audience=target_audience,\n bid_price=bid_price,\n )\n )\n return ad_id\n\n # 随机抽取广告\n @classmethod\n async def get_ad(\n cls, category: AdvertisementCategory | None = None, target_audience: list[str] | None = None\n ) -> Advertisement | None:\n if target_audience is None:\n target_audience = []\n current_date = datetime.now(CHINA_TZ)\n\n # 构建查询条件\n query = cls.find(\n Eq(cls.is_active, True),\n LTE(cls.start_date, current_date),\n GT(cls.end_date, current_date),\n )\n\n if category:\n query = query.find(Eq(cls.ad_category, category))\n\n if target_audience:\n query = query.find(In(cls.target_audience, target_audience))\n\n # 计算每个广告的调整后的权重\n ads = await query.to_list()\n\n if not ads:\n return None\n\n adjusted_weights = [math.log1p(ad.bid_price) * math.log1p(ad.weight) for ad in ads]\n total_weight = sum(adjusted_weights)\n\n # 根据权重随机选择广告\n probabilities = [w / total_weight for w in adjusted_weights]\n selected_ad = random.choices(ads, weights=probabilities, k=1)\n\n ctx = Context.current\n cid = ctx.client.last_value\n if ctx.scene.path_without_land in {\"guild.channel\", \"guild.user\"}:\n sid = ctx.scene[\"guild\"]\n else:\n sid = ctx.scene[\"group\"]\n\n selected_ad = selected_ad[0]\n selected_ad.views += 1\n await selected_ad.save() # type: ignore\n selected_ad = cast(Advertisement, selected_ad)\n\n await AdDisplayLog.insert(\n AdDisplayLog(\n ad_id=selected_ad.ad_id,\n scene_id=sid,\n client_id=cid,\n target_audience=list(set(selected_ad.target_audience) & set(target_audience)),\n )\n )\n return selected_ad"
},
{
"identifier": "CHINA_TZ",
"path": "utils/datetime.py",
"snippet": "CHINA_TZ = ZoneInfo(\"Asia/Shanghai\")"
},
{
"identifier": "fill_font",
"path": "utils/fonts_provider.py",
"snippet": "async def fill_font(route: Route, request: Request) -> None:\n url = URL(request.url)\n if not url.is_absolute():\n msg = \"字体地址不合法\"\n raise ValueError(msg)\n try:\n logger.debug(f\"Font {url.name} requested\")\n await route.fulfill(\n path=await get_font(url.name),\n content_type=font_mime_map.get(url.suffix),\n )\n except Exception:\n logger.error(f\"找不到字体 {url.name}\")\n await route.fallback()"
},
{
"identifier": "get_cut_str",
"path": "utils/strings.py",
"snippet": "def get_cut_str(input_str: str, cut: int) -> list[str]:\n \"\"\"\n 自动断行, 用于 Pillow 等不会自动换行的场景\n \"\"\"\n punc = \"\"\",,、。.??)》】“\"‘';;::!!·`~%^& \"\"\" # noqa: RUF001\n si = 0\n i = 0\n next_str = input_str\n str_list = []\n\n while re.search(r\"\\n\\n\\n\\n\\n\", next_str):\n next_str = re.sub(r\"\\n\\n\\n\\n\\n\", \"\\n\", next_str)\n for s in next_str:\n si += 1 if s in string.printable else 2\n i += 1\n if not next_str:\n break\n if next_str[0] == \"\\n\":\n next_str = next_str[1:]\n elif s == \"\\n\":\n str_list.append(next_str[: i - 1])\n next_str = next_str[i - 1 :]\n si = 0\n i = 0\n continue\n if si > cut:\n try:\n if next_str[i] in punc:\n i += 1\n except IndexError:\n str_list.append(next_str)\n return str_list\n str_list.append(next_str[:i])\n next_str = next_str[i:]\n si = 0\n i = 0\n str_list.append(next_str)\n i = 0\n non_wrap_str = []\n for p in str_list:\n if not p:\n break\n if p[-1] == \"\\n\":\n p = p[:-1] # noqa: PLW2901\n non_wrap_str.append(p)\n i += 1\n return non_wrap_str"
}
] | import asyncio
import hashlib
import random
import re
from base64 import b64encode
from datetime import datetime, timedelta
from io import BytesIO
from pathlib import Path
from graiax.text2img.playwright import (
HTMLRenderer,
MarkdownConverter,
PageOption,
ScreenshotOption,
convert_text,
)
from graiax.text2img.playwright.renderer import BuiltinCSS
from jinja2 import Template
from launart import Launart
from loguru import logger
from PIL import Image, ImageDraw, ImageFont
from playwright.async_api._generated import Request
from qrcode.image.styledpil import StyledPilImage
from qrcode.main import QRCode
from models.ad import AdvertisementCategory
from services import AiohttpClientService, S3FileService
from utils.builder import ADBuilder
from utils.datetime import CHINA_TZ
from .fonts_provider import fill_font
from .strings import get_cut_str | 3,800 | qrcode.clear()
qrcode.add_data("https://qun.qq.com/qunpro/robot/qunshare?robot_appid=101985270&robot_uin=2854214511")
invite_group: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_group.save(bio, format="PNG")
group_b64 = b64encode(bio.getvalue()).decode()
footer_css = Path("./static/css/footer.css").read_text()
html_render = HTMLRenderer(
page_option=PageOption(device_scale_factor=1.5),
screenshot_option=ScreenshotOption(type="jpeg", quality=80, full_page=True, scale="device"),
css=(
BuiltinCSS.reset,
BuiltinCSS.github,
BuiltinCSS.one_dark,
BuiltinCSS.container,
"@font-face{font-family:'harmo';font-weight:300;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Light.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:400;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Regular.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:500;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Medium.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:600;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Bold.ttf') format('truetype');}"
"*{font-family:'harmo',sans-serif}",
"body{background-color:#fafafac0;}",
"@media(prefers-color-scheme:light){.markdown-body{--color-canvas-default:#fafafac0;}}",
footer_css,
),
page_modifiers=[
lambda page: page.route(re.compile("^http://font.static.abot/(.+)$"), fill_font),
# lambda page: page.on("requestfailed", network_requestfailed),
],
)
md_converter = MarkdownConverter()
def network_requestfailed(request: Request) -> None:
url = request.url
fail = request.failure
method = request.method
logger.warning(f"[RequestFailed] [{method} {fail}] << {url}")
async def add_footer(
category: AdvertisementCategory = AdvertisementCategory.announcement,
target_audience: list | None = None,
) -> str:
if target_audience is None:
target_audience = []
ad = await ADBuilder.get_ad(category, target_audience=target_audience)
if random.random() > DEFAULT_AD_PROBABILITY and ad:
ad_type = ad.ad_category.value
if ad.content_type == 0:
ad_p = "<p>" + "</p><p>".join(ad.content.splitlines()) + "</p>"
ad_html = (
"<style>.ad-text::before{content: '" + ad_type + "'}</style>"
f'<div class="ad-text"><div class="text-area">{ad_p}</div></div>'
)
else:
s3file = Launart.current().get_component(S3FileService).s3file
ad_image = await s3file.get_object(ad.content)
ad_base64 = b64encode(await ad_image.read()).decode()
ad_html = (
"<style>.ad-img::before{content: '" + ad_type + "'}</style>"
f'<div class="ad-img"><img src="data:image/png;base64,{ad_base64}"/></div>'
)
else:
ad_type = "一言"
session = Launart.current().get_component(AiohttpClientService).session
async with session.get("https://v1.hitokoto.cn/?encode=text") as resp:
yiyan = await resp.text()
ad_html = (
"<style>.ad-text::before{content: '" + ad_type + "'}</style>"
f'<div class="ad-text"><div class="text-area">{yiyan}</div></div>'
)
return f"""
<div style="position:absolute;left:0;width:100%">
<footer>
<section class="left">
<div class="footer-text">
<p style="font-weight: bold">该图片由 ABot 生成</p>
<p style="font-size: 14px">{datetime.now(CHINA_TZ).strftime("%Y/%m/%d %p %I:%M:%S")}</p>
</div>
<section class="ad">{ad_html}</section>
</section>
<section class="right">
<div class="qrcode-area">
<img class="qrcode" src="data:image/png;base64,{group_b64}" />
<img class="qrcode" src="data:image/png;base64,{guild_b64}" />
</div>
<div class="qrcode-text">
<p>扫描二维码将 ABot 添加至你的群聊/频道</p>
</div>
</section>
</footer>
<section class="powered">Powered by Avilla</section>
</div>
"""
async def create_image(text: str, cut: int = 64) -> bytes:
str_hash = hashlib.sha256(text.encode("utf-8")).hexdigest()
cache.joinpath(str_hash[:2]).mkdir(exist_ok=True)
cache_file = cache.joinpath(f"{str_hash}.jpg")
if cache_file.exists():
logger.info(f"T2I Cache hit: {str_hash}")
image_bytes = cache_file.read_bytes()
else:
image_bytes = await asyncio.to_thread(_create_pil_image, text, cut)
cache_file.write_bytes(image_bytes)
return image_bytes
def _create_pil_image(text: str, cut: int) -> bytes:
|
# 广告出现的概率
DEFAULT_AD_PROBABILITY = 0.7
font_file = "./static/font/sarasa-mono-sc-semibold.ttf"
font = ImageFont.truetype(font_file, 22)
cache = Path("cache", "t2i")
cache.mkdir(exist_ok=True, parents=True)
qrcode = QRCode(image_factory=StyledPilImage)
qrcode.add_data("https://qun.qq.com/qunpro/robot/share?robot_appid=101985270")
invite_guild: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_guild.save(bio, format="PNG")
guild_b64 = b64encode(bio.getvalue()).decode()
qrcode.clear()
qrcode.add_data("https://qun.qq.com/qunpro/robot/qunshare?robot_appid=101985270&robot_uin=2854214511")
invite_group: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_group.save(bio, format="PNG")
group_b64 = b64encode(bio.getvalue()).decode()
footer_css = Path("./static/css/footer.css").read_text()
html_render = HTMLRenderer(
page_option=PageOption(device_scale_factor=1.5),
screenshot_option=ScreenshotOption(type="jpeg", quality=80, full_page=True, scale="device"),
css=(
BuiltinCSS.reset,
BuiltinCSS.github,
BuiltinCSS.one_dark,
BuiltinCSS.container,
"@font-face{font-family:'harmo';font-weight:300;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Light.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:400;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Regular.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:500;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Medium.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:600;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Bold.ttf') format('truetype');}"
"*{font-family:'harmo',sans-serif}",
"body{background-color:#fafafac0;}",
"@media(prefers-color-scheme:light){.markdown-body{--color-canvas-default:#fafafac0;}}",
footer_css,
),
page_modifiers=[
lambda page: page.route(re.compile("^http://font.static.abot/(.+)$"), fill_font),
# lambda page: page.on("requestfailed", network_requestfailed),
],
)
md_converter = MarkdownConverter()
def network_requestfailed(request: Request) -> None:
url = request.url
fail = request.failure
method = request.method
logger.warning(f"[RequestFailed] [{method} {fail}] << {url}")
async def add_footer(
category: AdvertisementCategory = AdvertisementCategory.announcement,
target_audience: list | None = None,
) -> str:
if target_audience is None:
target_audience = []
ad = await ADBuilder.get_ad(category, target_audience=target_audience)
if random.random() > DEFAULT_AD_PROBABILITY and ad:
ad_type = ad.ad_category.value
if ad.content_type == 0:
ad_p = "<p>" + "</p><p>".join(ad.content.splitlines()) + "</p>"
ad_html = (
"<style>.ad-text::before{content: '" + ad_type + "'}</style>"
f'<div class="ad-text"><div class="text-area">{ad_p}</div></div>'
)
else:
s3file = Launart.current().get_component(S3FileService).s3file
ad_image = await s3file.get_object(ad.content)
ad_base64 = b64encode(await ad_image.read()).decode()
ad_html = (
"<style>.ad-img::before{content: '" + ad_type + "'}</style>"
f'<div class="ad-img"><img src="data:image/png;base64,{ad_base64}"/></div>'
)
else:
ad_type = "一言"
session = Launart.current().get_component(AiohttpClientService).session
async with session.get("https://v1.hitokoto.cn/?encode=text") as resp:
yiyan = await resp.text()
ad_html = (
"<style>.ad-text::before{content: '" + ad_type + "'}</style>"
f'<div class="ad-text"><div class="text-area">{yiyan}</div></div>'
)
return f"""
<div style="position:absolute;left:0;width:100%">
<footer>
<section class="left">
<div class="footer-text">
<p style="font-weight: bold">该图片由 ABot 生成</p>
<p style="font-size: 14px">{datetime.now(CHINA_TZ).strftime("%Y/%m/%d %p %I:%M:%S")}</p>
</div>
<section class="ad">{ad_html}</section>
</section>
<section class="right">
<div class="qrcode-area">
<img class="qrcode" src="data:image/png;base64,{group_b64}" />
<img class="qrcode" src="data:image/png;base64,{guild_b64}" />
</div>
<div class="qrcode-text">
<p>扫描二维码将 ABot 添加至你的群聊/频道</p>
</div>
</section>
</footer>
<section class="powered">Powered by Avilla</section>
</div>
"""
async def create_image(text: str, cut: int = 64) -> bytes:
str_hash = hashlib.sha256(text.encode("utf-8")).hexdigest()
cache.joinpath(str_hash[:2]).mkdir(exist_ok=True)
cache_file = cache.joinpath(f"{str_hash}.jpg")
if cache_file.exists():
logger.info(f"T2I Cache hit: {str_hash}")
image_bytes = cache_file.read_bytes()
else:
image_bytes = await asyncio.to_thread(_create_pil_image, text, cut)
cache_file.write_bytes(image_bytes)
return image_bytes
def _create_pil_image(text: str, cut: int) -> bytes: | cut_str = "\n".join(get_cut_str(text, cut)) | 6 | 2023-12-16 13:19:56+00:00 | 8k |
Varexa/Gateway | chat_exporter/construct/message.py | [
{
"identifier": "discord",
"path": "chat_exporter/ext/discord_import.py",
"snippet": ""
},
{
"identifier": "Embed",
"path": "chat_exporter/construct/assets/embed.py",
"snippet": "class Embed:\r\n r: str\r\n g: str\r\n b: str\r\n title: str\r\n description: str\r\n author: str\r\n image: str\r\n thumbnail: str\r\n footer: str\r\n fields: str\r\n\r\n check_against = None\r\n\r\n def __init__(self, embed, guild):\r\n self.embed: discord.Embed = embed\r\n self.guild: discord.Guild = guild\r\n\r\n async def flow(self):\r\n self.check_against = _gather_checker()\r\n self.build_colour()\r\n await self.build_title()\r\n await self.build_description()\r\n await self.build_fields()\r\n await self.build_author()\r\n await self.build_image()\r\n await self.build_thumbnail()\r\n await self.build_footer()\r\n await self.build_embed()\r\n\r\n return self.embed\r\n\r\n def build_colour(self):\r\n self.r, self.g, self.b = (\r\n (self.embed.colour.r, self.embed.colour.g, self.embed.colour.b)\r\n if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour\r\n )\r\n\r\n async def build_title(self):\r\n self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else \"\"\r\n\r\n if self.title:\r\n self.title = await fill_out(self.guild, embed_title, [\r\n (\"EMBED_TITLE\", self.title, PARSE_MODE_MARKDOWN)\r\n ])\r\n\r\n async def build_description(self):\r\n self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else \"\"\r\n\r\n if self.description:\r\n self.description = await fill_out(self.guild, embed_description, [\r\n (\"EMBED_DESC\", self.embed.description, PARSE_MODE_EMBED)\r\n ])\r\n\r\n async def build_fields(self):\r\n self.fields = \"\"\r\n\r\n # This does not have to be here, but Pycord.\r\n if not self.embed.fields:\r\n return\r\n\r\n for field in self.embed.fields:\r\n field.name = html.escape(field.name)\r\n field.value = html.escape(field.value)\r\n\r\n if field.inline:\r\n self.fields += await fill_out(self.guild, embed_field_inline, [\r\n (\"FIELD_NAME\", field.name, PARSE_MODE_SPECIAL_EMBED),\r\n (\"FIELD_VALUE\", field.value, PARSE_MODE_EMBED)\r\n ])\r\n else:\r\n self.fields += await fill_out(self.guild, embed_field, [\r\n (\"FIELD_NAME\", field.name, PARSE_MODE_SPECIAL_EMBED),\r\n (\"FIELD_VALUE\", field.value, PARSE_MODE_EMBED)])\r\n\r\n async def build_author(self):\r\n self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else \"\"\r\n\r\n self.author = f'<a class=\"chatlog__embed-author-name-link\" href=\"{self.embed.author.url}\">{self.author}</a>' \\\r\n if self.embed.author.url != self.check_against \\\r\n else self.author\r\n\r\n author_icon = await fill_out(self.guild, embed_author_icon, [\r\n (\"AUTHOR\", self.author, PARSE_MODE_NONE),\r\n (\"AUTHOR_ICON\", self.embed.author.icon_url, PARSE_MODE_NONE)\r\n ]) if self.embed.author.icon_url != self.check_against else \"\"\r\n\r\n if author_icon == \"\" and self.author != \"\":\r\n self.author = await fill_out(self.guild, embed_author, [(\"AUTHOR\", self.author, PARSE_MODE_NONE)])\r\n else:\r\n self.author = author_icon\r\n\r\n async def build_image(self):\r\n self.image = await fill_out(self.guild, embed_image, [\r\n (\"EMBED_IMAGE\", str(self.embed.image.proxy_url), PARSE_MODE_NONE)\r\n ]) if self.embed.image.url != self.check_against else \"\"\r\n\r\n async def build_thumbnail(self):\r\n self.thumbnail = await fill_out(self.guild, embed_thumbnail, [\r\n (\"EMBED_THUMBNAIL\", str(self.embed.thumbnail.url), PARSE_MODE_NONE)]) \\\r\n if self.embed.thumbnail.url != self.check_against else \"\"\r\n\r\n async def build_footer(self):\r\n self.footer = html.escape(self.embed.footer.text) if self.embed.footer.text != self.check_against else \"\"\r\n footer_icon = self.embed.footer.icon_url if self.embed.footer.icon_url != self.check_against else None\r\n\r\n if not self.footer:\r\n return\r\n\r\n if footer_icon is not None:\r\n self.footer = await fill_out(self.guild, embed_footer_icon, [\r\n (\"EMBED_FOOTER\", self.footer, PARSE_MODE_NONE),\r\n (\"EMBED_FOOTER_ICON\", footer_icon, PARSE_MODE_NONE)\r\n ])\r\n else:\r\n self.footer = await fill_out(self.guild, embed_footer, [\r\n (\"EMBED_FOOTER\", self.footer, PARSE_MODE_NONE)])\r\n\r\n async def build_embed(self):\r\n self.embed = await fill_out(self.guild, embed_body, [\r\n (\"EMBED_R\", str(self.r)),\r\n (\"EMBED_G\", str(self.g)),\r\n (\"EMBED_B\", str(self.b)),\r\n (\"EMBED_AUTHOR\", self.author, PARSE_MODE_NONE),\r\n (\"EMBED_TITLE\", self.title, PARSE_MODE_NONE),\r\n (\"EMBED_IMAGE\", self.image, PARSE_MODE_NONE),\r\n (\"EMBED_THUMBNAIL\", self.thumbnail, PARSE_MODE_NONE),\r\n (\"EMBED_DESC\", self.description, PARSE_MODE_NONE),\r\n (\"EMBED_FIELDS\", self.fields, PARSE_MODE_NONE),\r\n (\"EMBED_FOOTER\", self.footer, PARSE_MODE_NONE),\r\n ])\r"
},
{
"identifier": "Reaction",
"path": "chat_exporter/construct/assets/reaction.py",
"snippet": "class Reaction:\r\n def __init__(self, reaction, guild):\r\n self.reaction = reaction\r\n self.guild = guild\r\n\r\n async def flow(self):\r\n await self.build_reaction()\r\n\r\n return self.reaction\r\n\r\n async def build_reaction(self):\r\n if \":\" in str(self.reaction.emoji):\r\n emoji_animated = re.compile(r\"<a:.*:.*>\")\r\n if emoji_animated.search(str(self.reaction.emoji)):\r\n await self.create_discord_reaction(\"gif\")\r\n else:\r\n await self.create_discord_reaction(\"png\")\r\n else:\r\n await self.create_standard_emoji()\r\n\r\n async def create_discord_reaction(self, emoji_type):\r\n pattern = r\":.*:(\\d*)\"\r\n emoji_id = re.search(pattern, str(self.reaction.emoji)).group(1)\r\n self.reaction = await fill_out(self.guild, custom_emoji, [\r\n (\"EMOJI\", str(emoji_id), PARSE_MODE_NONE),\r\n (\"EMOJI_COUNT\", str(self.reaction.count), PARSE_MODE_NONE),\r\n (\"EMOJI_FILE\", emoji_type, PARSE_MODE_NONE)\r\n ])\r\n\r\n async def create_standard_emoji(self):\r\n react_emoji = await convert_emoji(self.reaction.emoji)\r\n self.reaction = await fill_out(self.guild, emoji, [\r\n (\"EMOJI\", str(react_emoji), PARSE_MODE_NONE),\r\n (\"EMOJI_COUNT\", str(self.reaction.count), PARSE_MODE_NONE)\r\n ])\r"
},
{
"identifier": "Attachment",
"path": "chat_exporter/construct/assets/attachment.py",
"snippet": "class Attachment:\r\n def __init__(self, attachments, guild):\r\n self.attachments = attachments\r\n self.guild = guild\r\n\r\n async def flow(self):\r\n await self.build_attachment()\r\n return self.attachments\r\n\r\n async def build_attachment(self):\r\n if self.attachments.content_type is not None:\r\n if \"image\" in self.attachments.content_type:\r\n return await self.image()\r\n elif \"video\" in self.attachments.content_type:\r\n return await self.video()\r\n elif \"audio\" in self.attachments.content_type:\r\n return await self.audio()\r\n await self.file()\r\n\r\n async def image(self):\r\n self.attachments = await fill_out(self.guild, img_attachment, [\r\n (\"ATTACH_URL\", self.attachments.proxy_url, PARSE_MODE_NONE),\r\n (\"ATTACH_URL_THUMB\", self.attachments.proxy_url, PARSE_MODE_NONE)\r\n ])\r\n\r\n async def video(self):\r\n self.attachments = await fill_out(self.guild, video_attachment, [\r\n (\"ATTACH_URL\", self.attachments.proxy_url, PARSE_MODE_NONE)\r\n ])\r\n\r\n async def audio(self):\r\n file_icon = DiscordUtils.file_attachment_audio\r\n file_size = self.get_file_size(self.attachments.size)\r\n\r\n self.attachments = await fill_out(self.guild, audio_attachment, [\r\n (\"ATTACH_ICON\", file_icon, PARSE_MODE_NONE),\r\n (\"ATTACH_URL\", self.attachments.url, PARSE_MODE_NONE),\r\n (\"ATTACH_BYTES\", str(file_size), PARSE_MODE_NONE),\r\n (\"ATTACH_AUDIO\", self.attachments.proxy_url, PARSE_MODE_NONE),\r\n (\"ATTACH_FILE\", str(self.attachments.filename), PARSE_MODE_NONE)\r\n ])\r\n\r\n async def file(self):\r\n file_icon = await self.get_file_icon()\r\n\r\n file_size = self.get_file_size(self.attachments.size)\r\n\r\n self.attachments = await fill_out(self.guild, msg_attachment, [\r\n (\"ATTACH_ICON\", file_icon, PARSE_MODE_NONE),\r\n (\"ATTACH_URL\", self.attachments.url, PARSE_MODE_NONE),\r\n (\"ATTACH_BYTES\", str(file_size), PARSE_MODE_NONE),\r\n (\"ATTACH_FILE\", str(self.attachments.filename), PARSE_MODE_NONE)\r\n ])\r\n\r\n @staticmethod\r\n def get_file_size(file_size):\r\n if file_size == 0:\r\n return \"0 bytes\"\r\n size_name = (\"bytes\", \"KB\", \"MB\")\r\n i = int(math.floor(math.log(file_size, 1024)))\r\n p = math.pow(1024, i)\r\n s = round(file_size / p, 2)\r\n return \"%s %s\" % (s, size_name[i])\r\n\r\n async def get_file_icon(self) -> str:\r\n acrobat_types = \"pdf\"\r\n webcode_types = \"html\", \"htm\", \"css\", \"rss\", \"xhtml\", \"xml\"\r\n code_types = \"py\", \"cgi\", \"pl\", \"gadget\", \"jar\", \"msi\", \"wsf\", \"bat\", \"php\", \"js\"\r\n document_types = (\r\n \"txt\", \"doc\", \"docx\", \"rtf\", \"xls\", \"xlsx\", \"ppt\", \"pptx\", \"odt\", \"odp\", \"ods\", \"odg\", \"odf\", \"swx\",\r\n \"sxi\", \"sxc\", \"sxd\", \"stw\"\r\n )\r\n archive_types = (\r\n \"br\", \"rpm\", \"dcm\", \"epub\", \"zip\", \"tar\", \"rar\", \"gz\", \"bz2\", \"7x\", \"deb\", \"ar\", \"Z\", \"lzo\", \"lz\", \"lz4\",\r\n \"arj\", \"pkg\", \"z\"\r\n )\r\n\r\n extension = self.attachments.url.rsplit('.', 1)[1]\r\n if extension in acrobat_types:\r\n return DiscordUtils.file_attachment_acrobat\r\n elif extension in webcode_types:\r\n return DiscordUtils.file_attachment_webcode\r\n elif extension in code_types:\r\n return DiscordUtils.file_attachment_code\r\n elif extension in document_types:\r\n return DiscordUtils.file_attachment_document\r\n elif extension in archive_types:\r\n return DiscordUtils.file_attachment_archive\r\n else:\r\n return DiscordUtils.file_attachment_unknown\r"
},
{
"identifier": "Component",
"path": "chat_exporter/construct/assets/component.py",
"snippet": "class Component:\r\n styles = {\r\n \"primary\": \"#5865F2\",\r\n \"secondary\": \"#4F545C\",\r\n \"success\": \"#2D7D46\",\r\n \"danger\": \"#D83C3E\",\r\n \"blurple\": \"#5865F2\",\r\n \"grey\": \"#4F545C\",\r\n \"gray\": \"#4F545C\",\r\n \"green\": \"#2D7D46\",\r\n \"red\": \"#D83C3E\",\r\n \"link\": \"#4F545C\",\r\n }\r\n\r\n components: str = \"\"\r\n menus: str = \"\"\r\n buttons: str = \"\"\r\n menu_div_id: int = 0\r\n\r\n def __init__(self, component, guild):\r\n self.component = component\r\n self.guild = guild\r\n\r\n async def build_component(self, c):\r\n if isinstance(c, discord.Button):\r\n await self.build_button(c)\r\n elif isinstance(c, discord.SelectMenu):\r\n await self.build_menu(c)\r\n Component.menu_div_id += 1\r\n\r\n async def build_button(self, c):\r\n url = c.url if c.url else \"\"\r\n label = c.label if c.label else \"\"\r\n style = self.styles[str(c.style).split(\".\")[1]]\r\n icon = DiscordUtils.button_external_link if url else \"\"\r\n emoji = str(c.emoji) if c.emoji else \"\"\r\n\r\n self.buttons += await fill_out(self.guild, component_button, [\r\n (\"DISABLED\", \"chatlog__component-disabled\" if c.disabled else \"\", PARSE_MODE_NONE),\r\n (\"URL\", str(url), PARSE_MODE_NONE),\r\n (\"LABEL\", str(label), PARSE_MODE_MARKDOWN),\r\n (\"EMOJI\", str(emoji), PARSE_MODE_EMOJI),\r\n (\"ICON\", str(icon), PARSE_MODE_NONE),\r\n (\"STYLE\", style, PARSE_MODE_NONE)\r\n ])\r\n\r\n async def build_menu(self, c):\r\n placeholder = c.placeholder if c.placeholder else \"\"\r\n options = c.options\r\n content = \"\"\r\n\r\n if not c.disabled:\r\n content = await self.build_menu_options(options)\r\n\r\n self.menus += await fill_out(self.guild, component_menu, [\r\n (\"DISABLED\", \"chatlog__component-disabled\" if c.disabled else \"\", PARSE_MODE_NONE),\r\n (\"ID\", str(self.menu_div_id), PARSE_MODE_NONE),\r\n (\"PLACEHOLDER\", str(placeholder), PARSE_MODE_MARKDOWN),\r\n (\"CONTENT\", str(content), PARSE_MODE_NONE),\r\n (\"ICON\", DiscordUtils.interaction_dropdown_icon, PARSE_MODE_NONE),\r\n ])\r\n\r\n async def build_menu_options(self, options):\r\n content = []\r\n for option in options:\r\n if option.emoji:\r\n content.append(await fill_out(self.guild, component_menu_options_emoji, [\r\n (\"EMOJI\", str(option.emoji), PARSE_MODE_EMOJI),\r\n (\"TITLE\", str(option.label), PARSE_MODE_MARKDOWN),\r\n (\"DESCRIPTION\", str(option.description) if option.description else \"\", PARSE_MODE_MARKDOWN)\r\n ]))\r\n else:\r\n content.append(await fill_out(self.guild, component_menu_options, [\r\n (\"TITLE\", str(option.label), PARSE_MODE_MARKDOWN),\r\n (\"DESCRIPTION\", str(option.description) if option.description else \"\", PARSE_MODE_MARKDOWN)\r\n ]))\r\n\r\n if content:\r\n content = f'<div id=\"dropdownMenu{self.menu_div_id}\" class=\"dropdownContent\">{\"\".join(content)}</div>'\r\n\r\n return content\r\n\r\n async def flow(self):\r\n for c in self.component.children:\r\n await self.build_component(c)\r\n\r\n if self.menus:\r\n self.components += f'<div class=\"chatlog__components\">{self.menus}</div>'\r\n\r\n if self.buttons:\r\n self.components += f'<div class=\"chatlog__components\">{self.buttons}</div>'\r\n\r\n return self.components\r"
},
{
"identifier": "DiscordUtils",
"path": "chat_exporter/ext/discord_utils.py",
"snippet": "class DiscordUtils:\r\n logo: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-logo.svg'\r\n default_avatar: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-default.png'\r\n pinned_message_icon: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-pinned.svg'\r\n thread_channel_icon: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-thread.svg'\r\n file_attachment_audio: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-audio.svg'\r\n file_attachment_acrobat: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-acrobat.svg'\r\n file_attachment_webcode: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-webcode.svg'\r\n file_attachment_code: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-code.svg'\r\n file_attachment_document: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-document.svg'\r\n file_attachment_archive: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-archive.svg'\r\n file_attachment_unknown: str = 'https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-unknown.svg'\r\n button_external_link: str = '<img class=\"chatlog__reference-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-external-link.svg\">'\r\n reference_attachment_icon: str = '<img class=\"chatlog__reference-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-attachment.svg\">'\r\n interaction_command_icon: str = '<img class=\"chatlog__interaction-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-command.svg\">'\r\n interaction_dropdown_icon: str = '<img class=\"chatlog__dropdown-icon\" src=\"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/discord-dropdown.svg\">'"
},
{
"identifier": "fill_out",
"path": "chat_exporter/ext/html_generator.py",
"snippet": "PARSE_MODE_NONE = 0\r\nPARSE_MODE_NO_MARKDOWN = 1\r\nPARSE_MODE_MARKDOWN = 2\r\nPARSE_MODE_EMBED = 3\r\nPARSE_MODE_SPECIAL_EMBED = 4\r\nPARSE_MODE_REFERENCE = 5\r\nPARSE_MODE_EMOJI = 6\r\nasync def fill_out(guild, base, replacements):\r\ndef read_file(filename):\r"
}
] | import html
from typing import List, Optional, Union
from pytz import timezone
from datetime import timedelta
from chat_exporter.ext.discord_import import discord
from chat_exporter.construct.assets import Attachment, Component, Embed, Reaction
from chat_exporter.ext.discord_utils import DiscordUtils
from chat_exporter.ext.html_generator import (
fill_out,
bot_tag,
bot_tag_verified,
message_body,
message_pin,
message_thread,
message_content,
message_reference,
message_reference_unknown,
message_interaction,
img_attachment,
start_message,
end_message,
PARSE_MODE_NONE,
PARSE_MODE_MARKDOWN,
PARSE_MODE_REFERENCE,
)
| 6,409 | if isinstance(e, discord.NotFound):
self.message.reference = message_reference_unknown
return
is_bot = _gather_user_bot(message.author)
user_colour = await self._gather_user_colour(message.author)
if not message.content and not message.interaction:
message.content = "Click to see attachment"
elif not message.content and message.interaction:
message.content = "Click to see command"
icon = ""
if not message.interaction and (message.embeds or message.attachments):
icon = DiscordUtils.reference_attachment_icon
elif message.interaction:
icon = DiscordUtils.interaction_command_icon
_, message_edited_at = self.set_time(message)
if message_edited_at:
message_edited_at = _set_edit_at(message_edited_at)
avatar_url = message.author.display_avatar if message.author.display_avatar else DiscordUtils.default_avatar
self.message.reference = await fill_out(self.guild, message_reference, [
("AVATAR_URL", str(avatar_url), PARSE_MODE_NONE),
("BOT_TAG", is_bot, PARSE_MODE_NONE),
("NAME_TAG", "%s#%s" % (message.author.name, message.author.discriminator), PARSE_MODE_NONE),
("NAME", str(html.escape(message.author.display_name))),
("USER_COLOUR", user_colour, PARSE_MODE_NONE),
("CONTENT", message.content, PARSE_MODE_REFERENCE),
("EDIT", message_edited_at, PARSE_MODE_NONE),
("ICON", icon, PARSE_MODE_NONE),
("USER_ID", str(message.author.id), PARSE_MODE_NONE),
("MESSAGE_ID", str(self.message.reference.message_id), PARSE_MODE_NONE),
])
async def build_interaction(self):
if not self.message.interaction:
self.message.interaction = ""
return
user: Union[discord.Member, discord.User] = self.message.interaction.user
is_bot = _gather_user_bot(user)
user_colour = await self._gather_user_colour(user)
avatar_url = user.display_avatar if user.display_avatar else DiscordUtils.default_avatar
self.message.interaction = await fill_out(self.guild, message_interaction, [
("AVATAR_URL", str(avatar_url), PARSE_MODE_NONE),
("BOT_TAG", is_bot, PARSE_MODE_NONE),
("NAME_TAG", "%s#%s" % (user.name, user.discriminator), PARSE_MODE_NONE),
("NAME", str(html.escape(user.display_name))),
("USER_COLOUR", user_colour, PARSE_MODE_NONE),
("FILLER", "used ", PARSE_MODE_NONE),
("COMMAND", "/" + self.message.interaction.name, PARSE_MODE_NONE),
("USER_ID", str(user.id), PARSE_MODE_NONE),
("INTERACTION_ID", str(self.message.interaction.id), PARSE_MODE_NONE),
])
async def build_sticker(self):
if not self.message.stickers or not hasattr(self.message.stickers[0], "url"):
return
sticker_image_url = self.message.stickers[0].url
if sticker_image_url.endswith(".json"):
sticker = await self.message.stickers[0].fetch()
sticker_image_url = (
f"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/stickers/{sticker.pack_id}/{sticker.id}.gif"
)
self.message.content = await fill_out(self.guild, img_attachment, [
("ATTACH_URL", str(sticker_image_url), PARSE_MODE_NONE),
("ATTACH_URL_THUMB", str(sticker_image_url), PARSE_MODE_NONE)
])
async def build_assets(self):
for e in self.message.embeds:
self.embeds += await Embed(e, self.guild).flow()
for a in self.message.attachments:
self.attachments += await Attachment(a, self.guild).flow()
for c in self.message.components:
self.components += await Component(c, self.guild).flow()
for r in self.message.reactions:
self.reactions += await Reaction(r, self.guild).flow()
if self.reactions:
self.reactions = f'<div class="chatlog__reactions">{self.reactions}</div>'
async def build_message_template(self):
started = await self.generate_message_divider()
if started:
return self.message_html
self.message_html += await fill_out(self.guild, message_body, [
("MESSAGE_ID", str(self.message.id)),
("MESSAGE_CONTENT", self.message.content, PARSE_MODE_NONE),
("EMBEDS", self.embeds, PARSE_MODE_NONE),
("ATTACHMENTS", self.attachments, PARSE_MODE_NONE),
("COMPONENTS", self.components, PARSE_MODE_NONE),
("EMOJI", self.reactions, PARSE_MODE_NONE),
("TIMESTAMP", self.message_created_at, PARSE_MODE_NONE),
("TIME", self.message_created_at.split()[-1], PARSE_MODE_NONE),
])
return self.message_html
def _generate_message_divider_check(self):
return bool(
self.previous_message is None or self.message.reference != "" or self.message.interaction != "" or
self.previous_message.author.id != self.message.author.id or self.message.webhook_id is not None or
self.message.created_at > (self.previous_message.created_at + timedelta(minutes=4))
)
async def generate_message_divider(self, channel_audit=False):
if channel_audit or self._generate_message_divider_check():
if self.previous_message is not None:
|
def _gather_user_bot(author: discord.Member):
if author.bot and author.public_flags.verified_bot:
return bot_tag_verified
elif author.bot:
return bot_tag
return ""
def _set_edit_at(message_edited_at):
return f'<span class="chatlog__reference-edited-timestamp" title="{message_edited_at}">(edited)</span>'
class MessageConstruct:
message_html: str = ""
# Asset Types
embeds: str = ""
reactions: str = ""
components: str = ""
attachments: str = ""
time_format: str = ""
def __init__(
self,
message: discord.Message,
previous_message: Optional[discord.Message],
pytz_timezone,
military_time: bool,
guild: discord.Guild,
meta_data: dict
):
self.message = message
self.previous_message = previous_message
self.pytz_timezone = pytz_timezone
self.military_time = military_time
self.guild = guild
self.time_format = "%A, %e %B %Y %I:%M %p"
if self.military_time:
self.time_format = "%A, %e %B %Y %H:%M"
self.message_created_at, self.message_edited_at = self.set_time()
self.meta_data = meta_data
async def construct_message(
self,
) -> (str, dict):
if discord.MessageType.pins_add == self.message.type:
await self.build_pin()
elif discord.MessageType.thread_created == self.message.type:
await self.build_thread()
else:
await self.build_message()
return self.message_html, self.meta_data
async def build_message(self):
await self.build_content()
await self.build_reference()
await self.build_interaction()
await self.build_sticker()
await self.build_assets()
await self.build_message_template()
await self.build_meta_data()
async def build_pin(self):
await self.generate_message_divider(channel_audit=True)
await self.build_pin_template()
async def build_thread(self):
await self.generate_message_divider(channel_audit=True)
await self.build_thread_template()
async def build_meta_data(self):
user_id = self.message.author.id
if user_id in self.meta_data:
self.meta_data[user_id][4] += 1
else:
user_name_discriminator = self.message.author.name + "#" + self.message.author.discriminator
user_created_at = self.message.author.created_at
user_bot = _gather_user_bot(self.message.author)
user_avatar = (
self.message.author.display_avatar if self.message.author.display_avatar
else DiscordUtils.default_avatar
)
user_joined_at = self.message.author.joined_at if hasattr(self.message.author, "joined_at") else None
user_display_name = (
f'<div class="meta__display-name">{self.message.author.display_name}</div>'
if self.message.author.display_name != self.message.author.name
else ""
)
self.meta_data[user_id] = [
user_name_discriminator, user_created_at, user_bot, user_avatar, 1, user_joined_at, user_display_name
]
async def build_content(self):
if not self.message.content:
self.message.content = ""
return
if self.message_edited_at:
self.message_edited_at = _set_edit_at(self.message_edited_at)
self.message.content = html.escape(self.message.content)
self.message.content = await fill_out(self.guild, message_content, [
("MESSAGE_CONTENT", self.message.content, PARSE_MODE_MARKDOWN),
("EDIT", self.message_edited_at, PARSE_MODE_NONE)
])
async def build_reference(self):
if not self.message.reference:
self.message.reference = ""
return
try:
message: discord.Message = await self.message.channel.fetch_message(self.message.reference.message_id)
except (discord.NotFound, discord.HTTPException) as e:
self.message.reference = ""
if isinstance(e, discord.NotFound):
self.message.reference = message_reference_unknown
return
is_bot = _gather_user_bot(message.author)
user_colour = await self._gather_user_colour(message.author)
if not message.content and not message.interaction:
message.content = "Click to see attachment"
elif not message.content and message.interaction:
message.content = "Click to see command"
icon = ""
if not message.interaction and (message.embeds or message.attachments):
icon = DiscordUtils.reference_attachment_icon
elif message.interaction:
icon = DiscordUtils.interaction_command_icon
_, message_edited_at = self.set_time(message)
if message_edited_at:
message_edited_at = _set_edit_at(message_edited_at)
avatar_url = message.author.display_avatar if message.author.display_avatar else DiscordUtils.default_avatar
self.message.reference = await fill_out(self.guild, message_reference, [
("AVATAR_URL", str(avatar_url), PARSE_MODE_NONE),
("BOT_TAG", is_bot, PARSE_MODE_NONE),
("NAME_TAG", "%s#%s" % (message.author.name, message.author.discriminator), PARSE_MODE_NONE),
("NAME", str(html.escape(message.author.display_name))),
("USER_COLOUR", user_colour, PARSE_MODE_NONE),
("CONTENT", message.content, PARSE_MODE_REFERENCE),
("EDIT", message_edited_at, PARSE_MODE_NONE),
("ICON", icon, PARSE_MODE_NONE),
("USER_ID", str(message.author.id), PARSE_MODE_NONE),
("MESSAGE_ID", str(self.message.reference.message_id), PARSE_MODE_NONE),
])
async def build_interaction(self):
if not self.message.interaction:
self.message.interaction = ""
return
user: Union[discord.Member, discord.User] = self.message.interaction.user
is_bot = _gather_user_bot(user)
user_colour = await self._gather_user_colour(user)
avatar_url = user.display_avatar if user.display_avatar else DiscordUtils.default_avatar
self.message.interaction = await fill_out(self.guild, message_interaction, [
("AVATAR_URL", str(avatar_url), PARSE_MODE_NONE),
("BOT_TAG", is_bot, PARSE_MODE_NONE),
("NAME_TAG", "%s#%s" % (user.name, user.discriminator), PARSE_MODE_NONE),
("NAME", str(html.escape(user.display_name))),
("USER_COLOUR", user_colour, PARSE_MODE_NONE),
("FILLER", "used ", PARSE_MODE_NONE),
("COMMAND", "/" + self.message.interaction.name, PARSE_MODE_NONE),
("USER_ID", str(user.id), PARSE_MODE_NONE),
("INTERACTION_ID", str(self.message.interaction.id), PARSE_MODE_NONE),
])
async def build_sticker(self):
if not self.message.stickers or not hasattr(self.message.stickers[0], "url"):
return
sticker_image_url = self.message.stickers[0].url
if sticker_image_url.endswith(".json"):
sticker = await self.message.stickers[0].fetch()
sticker_image_url = (
f"https://cdn.jsdelivr.net/gh/mahtoid/DiscordUtils@master/stickers/{sticker.pack_id}/{sticker.id}.gif"
)
self.message.content = await fill_out(self.guild, img_attachment, [
("ATTACH_URL", str(sticker_image_url), PARSE_MODE_NONE),
("ATTACH_URL_THUMB", str(sticker_image_url), PARSE_MODE_NONE)
])
async def build_assets(self):
for e in self.message.embeds:
self.embeds += await Embed(e, self.guild).flow()
for a in self.message.attachments:
self.attachments += await Attachment(a, self.guild).flow()
for c in self.message.components:
self.components += await Component(c, self.guild).flow()
for r in self.message.reactions:
self.reactions += await Reaction(r, self.guild).flow()
if self.reactions:
self.reactions = f'<div class="chatlog__reactions">{self.reactions}</div>'
async def build_message_template(self):
started = await self.generate_message_divider()
if started:
return self.message_html
self.message_html += await fill_out(self.guild, message_body, [
("MESSAGE_ID", str(self.message.id)),
("MESSAGE_CONTENT", self.message.content, PARSE_MODE_NONE),
("EMBEDS", self.embeds, PARSE_MODE_NONE),
("ATTACHMENTS", self.attachments, PARSE_MODE_NONE),
("COMPONENTS", self.components, PARSE_MODE_NONE),
("EMOJI", self.reactions, PARSE_MODE_NONE),
("TIMESTAMP", self.message_created_at, PARSE_MODE_NONE),
("TIME", self.message_created_at.split()[-1], PARSE_MODE_NONE),
])
return self.message_html
def _generate_message_divider_check(self):
return bool(
self.previous_message is None or self.message.reference != "" or self.message.interaction != "" or
self.previous_message.author.id != self.message.author.id or self.message.webhook_id is not None or
self.message.created_at > (self.previous_message.created_at + timedelta(minutes=4))
)
async def generate_message_divider(self, channel_audit=False):
if channel_audit or self._generate_message_divider_check():
if self.previous_message is not None:
| self.message_html += await fill_out(self.guild, end_message, [])
| 6 | 2023-12-18 14:17:31+00:00 | 8k |
mariaalfaroc/a2s-transformer | train.py | [
{
"identifier": "CTCTrainedCRNN",
"path": "networks/crnn/model.py",
"snippet": "class CTCTrainedCRNN(LightningModule):\n def __init__(\n self, w2i, i2w, ytest_i2w=None, max_audio_len=100, frame_multiplier_factor=8\n ):\n super(CTCTrainedCRNN, self).__init__()\n # Save hyperparameters\n self.save_hyperparameters()\n # Dictionaries\n self.w2i = w2i\n self.i2w = i2w\n self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w\n # Model\n self.model = CRNN(\n output_size=len(self.w2i) + 1,\n frame_multiplier_factor=frame_multiplier_factor,\n )\n self.width_reduction = self.model.cnn.width_reduction\n self.summary(max_audio_len)\n # Loss: the target index cannot be blank!\n self.compute_ctc_loss = CTCLoss(blank=len(self.w2i), zero_infinity=False)\n # Predictions\n self.Y = []\n self.YHat = []\n\n def summary(self, max_audio_len):\n summary(self.model, input_size=[1, NUM_CHANNELS, IMG_HEIGHT, max_audio_len])\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.model.parameters(), lr=1e-3, weight_decay=1e-6)\n\n def forward(self, x):\n return self.model(x)\n\n def training_step(self, batch, batch_idx):\n x, xl, y, yl = batch\n yhat = self.model(x)\n # ------ CTC Requirements ------\n # yhat: [batch, frames, vocab_size]\n yhat = yhat.log_softmax(dim=2)\n yhat = yhat.permute(1, 0, 2).contiguous()\n # ------------------------------\n loss = self.compute_ctc_loss(yhat, y, xl, yl)\n self.log(\"train_loss\", loss, prog_bar=True, logger=True, on_epoch=True)\n return loss\n\n def ctc_greedy_decoder(self, y_pred, i2w):\n # y_pred = [seq_len, num_classes]\n # Best path\n y_pred_decoded = torch.argmax(y_pred, dim=1)\n # Merge repeated elements\n y_pred_decoded = torch.unique_consecutive(y_pred_decoded, dim=0).tolist()\n # Convert to string; len(i2w) -> CTC-blank\n y_pred_decoded = [i2w[i] for i in y_pred_decoded if i != len(i2w)]\n return y_pred_decoded\n\n def validation_step(self, batch, batch_idx):\n x, y = batch # batch_size = 1\n # Model prediction (decoded using the vocabulary on which it was trained)\n yhat = self.model(x)[0]\n yhat = yhat.log_softmax(dim=-1).detach().cpu()\n yhat = self.ctc_greedy_decoder(yhat, self.i2w)\n # Decoded ground truth\n y = [self.ytest_i2w[i.item()] for i in y[0]]\n # Append to later compute metrics\n self.Y.append(y)\n self.YHat.append(yhat)\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def on_validation_epoch_end(self, name=\"val\", print_random_samples=False):\n metrics = compute_metrics(y_true=self.Y, y_pred=self.YHat)\n for k, v in metrics.items():\n self.log(f\"{name}_{k}\", v, prog_bar=True, logger=True, on_epoch=True)\n # Print random samples\n if print_random_samples:\n index = random.randint(0, len(self.Y) - 1)\n print(f\"Ground truth - {self.Y[index]}\")\n print(f\"Prediction - {self.YHat[index]}\")\n # Clear predictions\n self.Y.clear()\n self.YHat.clear()\n return metrics\n\n def on_test_epoch_end(self):\n return self.on_validation_epoch_end(name=\"test\", print_random_samples=True)"
},
{
"identifier": "A2STransformer",
"path": "networks/transformer/model.py",
"snippet": "class A2STransformer(LightningModule):\n def __init__(\n self,\n max_seq_len,\n max_audio_len,\n w2i,\n i2w,\n ytest_i2w=None,\n attn_window=-1,\n teacher_forcing_prob=0.5,\n ):\n super(A2STransformer, self).__init__()\n # Save hyperparameters\n self.save_hyperparameters()\n # Dictionaries\n self.w2i = w2i\n self.i2w = i2w\n self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w\n self.padding_idx = w2i[\"<PAD>\"]\n # Model\n self.max_seq_len = max_seq_len\n self.teacher_forcing_prob = teacher_forcing_prob\n self.encoder = Encoder(in_channels=NUM_CHANNELS)\n self.pos_2d = PositionalEncoding2D(\n num_channels=256,\n max_height=math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION),\n max_width=math.ceil(max_audio_len / WIDTH_REDUCTION),\n )\n self.decoder = Decoder(\n output_size=len(self.w2i),\n max_seq_len=max_seq_len,\n num_embeddings=len(self.w2i),\n padding_idx=self.padding_idx,\n attn_window=attn_window,\n )\n self.summary(max_audio_len)\n # Loss\n self.compute_loss = CrossEntropyLoss(ignore_index=self.padding_idx)\n # Predictions\n self.Y = []\n self.YHat = []\n\n def summary(self, max_audio_len):\n print(\"Encoder\")\n summary(self.encoder, input_size=[1, NUM_CHANNELS, IMG_HEIGHT, max_audio_len])\n print(\"Decoder\")\n tgt_size = [1, self.max_seq_len]\n memory_size = [\n 1,\n math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION)\n * math.ceil(max_audio_len / WIDTH_REDUCTION),\n 256,\n ]\n memory_len_size = [1]\n summary(\n self.decoder,\n input_size=[tgt_size, memory_size, memory_len_size],\n dtypes=[torch.int64, torch.float32, torch.int64],\n )\n\n def configure_optimizers(self):\n return torch.optim.Adam(\n list(self.encoder.parameters()) + list(self.decoder.parameters()),\n lr=1e-4,\n amsgrad=False,\n )\n\n def forward(self, x, xl, y_in):\n # Encoder\n x = self.encoder(x=x)\n # Prepare for decoder\n # 2D PE + flatten + permute\n x = self.pos_2d(x)\n x = x.flatten(2).permute(0, 2, 1).contiguous()\n # Decoder\n y_out_hat = self.decoder(tgt=y_in, memory=x, memory_len=xl)\n return y_out_hat\n\n def apply_teacher_forcing(self, y):\n # y.shape = [batch_size, seq_len]\n y_errored = y.clone()\n for i in range(y_errored.size(0)):\n for j in range(y_errored.size(1)):\n if (\n random.random() < self.teacher_forcing_prob\n and y[i, j] != self.padding_idx\n ):\n y_errored[i, j] = random.randint(0, len(self.w2i) - 1)\n return y_errored\n\n def training_step(self, batch, batch_idx):\n x, xl, y_in, y_out = batch\n y_in = self.apply_teacher_forcing(y_in)\n yhat = self.forward(x=x, xl=xl, y_in=y_in)\n loss = self.compute_loss(yhat, y_out)\n self.log(\"train_loss\", loss, prog_bar=True, logger=True, on_epoch=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n assert x.size(0) == 1, \"Inference only supports batch_size = 1\"\n\n # Encoder\n x = self.encoder(x=x)\n # Prepare for decoder\n # 2D PE + flatten + permute\n x = self.pos_2d(x)\n x = x.flatten(2).permute(0, 2, 1).contiguous()\n # Autoregressive decoding\n y_in = torch.tensor([self.w2i[SOS_TOKEN]]).unsqueeze(0).long().to(x.device)\n yhat = []\n for _ in range(self.max_seq_len):\n y_out_hat = self.decoder(tgt=y_in, memory=x, memory_len=None)\n y_out_hat = y_out_hat[0, :, -1] # Last token\n y_out_hat_token = y_out_hat.argmax(dim=-1).item()\n y_out_hat_word = self.i2w[y_out_hat_token]\n yhat.append(y_out_hat_word)\n if y_out_hat_word == EOS_TOKEN:\n break\n\n y_in = torch.cat(\n [y_in, torch.tensor([[y_out_hat_token]]).long().to(x.device)], dim=1\n )\n\n # Decoded ground truth\n y = [self.ytest_i2w[i.item()] for i in y[0][1:]] # Remove SOS_TOKEN\n # Append to later compute metrics\n self.Y.append(y)\n self.YHat.append(yhat)\n\n def test_step(self, batch, batch_idx):\n return self.validation_step(batch, batch_idx)\n\n def on_validation_epoch_end(self, name=\"val\", print_random_samples=False):\n metrics = compute_metrics(y_true=self.Y, y_pred=self.YHat)\n for k, v in metrics.items():\n self.log(f\"{name}_{k}\", v, prog_bar=True, logger=True, on_epoch=True)\n # Print random samples\n if print_random_samples:\n index = random.randint(0, len(self.Y) - 1)\n print(f\"Ground truth - {self.Y[index]}\")\n print(f\"Prediction - {self.YHat[index]}\")\n # Clear predictions\n self.Y.clear()\n self.YHat.clear()\n return metrics\n\n def on_test_epoch_end(self):\n return self.on_validation_epoch_end(name=\"test\", print_random_samples=True)"
},
{
"identifier": "CTCDataModule",
"path": "my_utils/ctc_dataset.py",
"snippet": "class CTCDataModule(LightningDataModule):\n def __init__(\n self,\n ds_name: str,\n use_voice_change_token: bool = False,\n batch_size: int = 16,\n num_workers: int = 20,\n width_reduction: int = 2,\n ):\n super(CTCDataModule, self).__init__()\n self.ds_name = ds_name\n self.use_voice_change_token = use_voice_change_token\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.width_reduction = (\n width_reduction # Must be overrided with that of the model!\n )\n\n def setup(self, stage: str):\n if stage == \"fit\":\n self.train_ds = CTCDataset(\n ds_name=self.ds_name,\n partition_type=\"train\",\n width_reduction=self.width_reduction,\n use_voice_change_token=self.use_voice_change_token,\n )\n self.val_ds = CTCDataset(\n ds_name=self.ds_name,\n partition_type=\"val\",\n width_reduction=self.width_reduction,\n use_voice_change_token=self.use_voice_change_token,\n )\n\n if stage == \"test\" or stage == \"predict\":\n self.test_ds = CTCDataset(\n ds_name=self.ds_name,\n partition_type=\"test\",\n width_reduction=self.width_reduction,\n use_voice_change_token=self.use_voice_change_token,\n )\n\n def train_dataloader(self):\n return DataLoader(\n self.train_ds,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n collate_fn=ctc_batch_preparation,\n ) # prefetch_factor=2\n\n def val_dataloader(self):\n return DataLoader(\n self.val_ds,\n batch_size=1,\n shuffle=False,\n num_workers=self.num_workers,\n ) # prefetch_factor=2\n\n def test_dataloader(self):\n return DataLoader(\n self.test_ds,\n batch_size=1,\n shuffle=False,\n num_workers=self.num_workers,\n ) # prefetch_factor=2\n\n def predict_dataloader(self):\n print(\"Using test_dataloader for predictions.\")\n return self.test_dataloader(self)\n\n def get_w2i_and_i2w(self):\n try:\n return self.train_ds.w2i, self.train_ds.i2w\n except AttributeError:\n return self.test_ds.w2i, self.test_ds.i2w\n\n def get_max_seq_len(self):\n try:\n return self.train_ds.max_seq_len\n except AttributeError:\n return self.test_ds.max_seq_len\n\n def get_max_audio_len(self):\n try:\n return self.train_ds.max_audio_len\n except AttributeError:\n return self.test_ds.max_audio_len\n\n def get_frame_multiplier_factor(self):\n try:\n return self.train_ds.frame_multiplier_factor\n except AttributeError:\n return self.test_ds.frame_multiplier_factor"
},
{
"identifier": "ARDataModule",
"path": "my_utils/ar_dataset.py",
"snippet": "class ARDataModule(LightningDataModule):\n def __init__(\n self,\n ds_name: str,\n use_voice_change_token: bool = False,\n batch_size: int = 16,\n num_workers: int = 20,\n ):\n super(ARDataModule, self).__init__()\n self.ds_name = ds_name\n self.use_voice_change_token = use_voice_change_token\n self.batch_size = batch_size\n self.num_workers = num_workers\n\n def setup(self, stage: str):\n if stage == \"fit\":\n self.train_ds = ARDataset(\n ds_name=self.ds_name,\n partition_type=\"train\",\n use_voice_change_token=self.use_voice_change_token,\n )\n self.val_ds = ARDataset(\n ds_name=self.ds_name,\n partition_type=\"val\",\n use_voice_change_token=self.use_voice_change_token,\n )\n\n if stage == \"test\" or stage == \"predict\":\n self.test_ds = ARDataset(\n ds_name=self.ds_name,\n partition_type=\"test\",\n use_voice_change_token=self.use_voice_change_token,\n )\n\n def train_dataloader(self):\n return DataLoader(\n self.train_ds,\n batch_size=self.batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n collate_fn=ar_batch_preparation,\n ) # prefetch_factor=2\n\n def val_dataloader(self):\n return DataLoader(\n self.val_ds,\n batch_size=1,\n shuffle=False,\n num_workers=self.num_workers,\n ) # prefetch_factor=2\n\n def test_dataloader(self):\n return DataLoader(\n self.test_ds,\n batch_size=1,\n shuffle=False,\n num_workers=self.num_workers,\n ) # prefetch_factor=2\n\n def predict_dataloader(self):\n print(\"Using test_dataloader for predictions.\")\n return self.test_dataloader(self)\n\n def get_w2i_and_i2w(self):\n try:\n return self.train_ds.w2i, self.train_ds.i2w\n except AttributeError:\n return self.test_ds.w2i, self.test_ds.i2w\n\n def get_max_seq_len(self):\n try:\n return self.train_ds.max_seq_len\n except AttributeError:\n return self.test_ds.max_seq_len\n\n def get_max_audio_len(self):\n try:\n return self.train_ds.max_audio_len\n except AttributeError:\n return self.test_ds.max_audio_len"
},
{
"identifier": "seed_everything",
"path": "my_utils/seed.py",
"snippet": "def seed_everything(seed: int, deterministic: bool = True, benchmark: bool = True):\n import random, os\n import numpy as np\n import torch\n\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.backends.cudnn.deterministic = deterministic\n torch.backends.cudnn.benchmark = benchmark"
}
] | import gc
import fire
import torch
from lightning.pytorch import Trainer
from lightning.pytorch.callbacks import EarlyStopping, ModelCheckpoint
from lightning.pytorch.loggers.wandb import WandbLogger
from networks.crnn.model import CTCTrainedCRNN
from networks.transformer.model import A2STransformer
from my_utils.ctc_dataset import CTCDataModule
from my_utils.ar_dataset import ARDataModule
from my_utils.seed import seed_everything | 4,405 |
seed_everything(42, benchmark=False)
def train(
ds_name,
model_type: str = "crnn",
attn_window: int = -1,
use_voice_change_token: bool = False,
epochs: int = 1000,
patience: int = 20,
batch_size: int = 16,
):
gc.collect()
torch.cuda.empty_cache()
# Experiment info
print("TRAIN EXPERIMENT")
print(f"\tDataset: {ds_name}")
print(f"\tModel type: {model_type}")
print(f"\tAttention window: {attn_window} (Used if model type is transformer)")
print(f"\tUse voice change token: {use_voice_change_token}")
print(f"\tEpochs: {epochs}")
print(f"\tPatience: {patience}")
print(f"\tBatch size: {batch_size}")
if model_type == "crnn":
# Data module
datamodule = CTCDataModule(
ds_name=ds_name,
use_voice_change_token=use_voice_change_token,
batch_size=batch_size,
)
datamodule.setup(stage="fit")
w2i, i2w = datamodule.get_w2i_and_i2w()
# Model
model = CTCTrainedCRNN(
w2i=w2i,
i2w=i2w,
max_audio_len=datamodule.get_max_audio_len(),
frame_multiplier_factor=datamodule.get_frame_multiplier_factor(),
)
# Override the datamodule width reduction factors with that of the model
datamodule.width_reduction = model.width_reduction
elif model_type == "transformer":
# Data module
|
seed_everything(42, benchmark=False)
def train(
ds_name,
model_type: str = "crnn",
attn_window: int = -1,
use_voice_change_token: bool = False,
epochs: int = 1000,
patience: int = 20,
batch_size: int = 16,
):
gc.collect()
torch.cuda.empty_cache()
# Experiment info
print("TRAIN EXPERIMENT")
print(f"\tDataset: {ds_name}")
print(f"\tModel type: {model_type}")
print(f"\tAttention window: {attn_window} (Used if model type is transformer)")
print(f"\tUse voice change token: {use_voice_change_token}")
print(f"\tEpochs: {epochs}")
print(f"\tPatience: {patience}")
print(f"\tBatch size: {batch_size}")
if model_type == "crnn":
# Data module
datamodule = CTCDataModule(
ds_name=ds_name,
use_voice_change_token=use_voice_change_token,
batch_size=batch_size,
)
datamodule.setup(stage="fit")
w2i, i2w = datamodule.get_w2i_and_i2w()
# Model
model = CTCTrainedCRNN(
w2i=w2i,
i2w=i2w,
max_audio_len=datamodule.get_max_audio_len(),
frame_multiplier_factor=datamodule.get_frame_multiplier_factor(),
)
# Override the datamodule width reduction factors with that of the model
datamodule.width_reduction = model.width_reduction
elif model_type == "transformer":
# Data module | datamodule = ARDataModule( | 3 | 2023-12-18 20:01:00+00:00 | 8k |
yacinxx/dnakey | app.py | [
{
"identifier": "CreateProfile",
"path": "create_profile.py",
"snippet": "class CreateProfile(PrimeKeyConfig):\r\n def new_profile(self):\r\n key_id = \"prime-key-profile\"\r\n self.hash_key = st.text_input(\"Enter Your Prime Key: (:red[Required])\", \r\n type=\"password\", \r\n help=\"Prime Key is your login token method so 'DnaKey' can recognize you!\", \r\n key=key_id)\r\n self.create_hash_key = self.agent_prime_key(self.hash_key)\r\n if self.create_hash_key == 1:\r\n self.tab_name = \"Create Profile\"\r\n return \r\n else:\r\n self.tab_name = \"Create Profile\"\r\n self.config_has_key = f\"dnakey${self.create_hash_key[:32:2]}\"\r\n self.config_manager = ConfigManager(self.config_has_key)\r\n self.config_manager.update_date_time()\r\n self.config_manager.update_config()\r\n self.col_new_profile1, self.col_new_profile2 = st.columns(2)\r\n self.created_profiles = self.config_manager.get_created_profiles()\r\n with self.col_new_profile1:\r\n self.profile_name_settings()\r\n self.activate_merge = st.toggle(\"Activate Merge: (:blue[Advance])\", \r\n value=True, \r\n help=\"[:red[WARNING]]: If you turn it off you can't merge this profile with a other!\") \r\n st.caption(\":red[WARNING]: If you turn it off you can't merge this profile with a other!\")\r\n with self.col_new_profile2:\r\n if self.created_profiles < 1:\r\n st.warning(\"**_Caution_**: Refrain from implementing DnaKey in real-life accounts as it remains in an experimental phase, and its reliability and security have not been fully validated.\", \r\n icon=\"❕\")\r\n self.new_profile_advance_settings()\r\n self.save_cookies = st.toggle(\"Save Cookies: (:green[For Better Performance])\", \r\n value=True, \r\n help=\"With save cookies 'dnakey' can track prime key activity and show you all the info about it and whose use it!\") \r\n st.caption(\"**:orange[Security Tracking]**: If you turn it off 'DnaKey' can't give you good result!\") \r\n self.create_new_profile()\r\n\r\n def profile_name_settings(self):\r\n if self.created_profiles < 1:\r\n st.info(\"Welcome to DnaKey. We are glad you wanna test it!\", icon=\"🎉\")\r\n with st.expander(\"Profile Settings! [See More...]\", expanded=True):\r\n if self.created_profiles < 1:\r\n st.info(\"You can start with given your profile a name!\", icon=\"🍿\")\r\n self.create_profile_name()\r\n if self.created_profiles > 1:\r\n self.options_profile_name = st.selectbox(\"Choose Options: (:red[Experimental])\", [\"One Profile\"], disabled=True)\r\n st.caption(\":blue[Info]: This option under the test (not stable!)\")\r\n else:\r\n self.options_profile_name = \"One Profile\"\r\n\r\n def create_profile_name(self):\r\n self.profile_name = st.text_input(\"Profile Name: (:red[Required])\", \r\n placeholder=\"Example: Emails...\",\r\n key='profile_name')\r\n st.caption(\":green[Note]: You can't change your profile name later!\")\r\n\r\n def new_profile_advance_settings(self):\r\n with st.expander(\"Advance Settings! [See More...]\"):\r\n if self.created_profiles < 1:\r\n st.info(\"This is the Advanced profile settings you don't have to mess with this yet!\", icon=\"⚙\")\r\n self.length_slider = st.slider(\"Enter Max Length: (:green[Optional])\", min_value=4, max_value=20, value=10, step=2)\r\n st.caption(\":green[Note]: The Max Length leave it in (:blue[default mode 10])\")\r\n self.has_choice = st.multiselect(\"Password Includes: (:green[Optional])\",\r\n options=[\"Lowercase\",\"Uppercase\",\"Numbers\",\"Symbols\", \"Arabic\"],\r\n default=[\"Lowercase\",\"Uppercase\",\"Numbers\"])\r\n if self.has_choice:\r\n self.lowercase = True if \"Lowercase\" in self.has_choice else False\r\n self.uppercase = True if \"Uppercase\" in self.has_choice else False\r\n self.numbers = True if \"Numbers\" in self.has_choice else False\r\n self.symbols = True if \"Symbols\" in self.has_choice else False\r\n self.arabic = True if \"Arabic\" in self.has_choice else False \r\n\r\n def create_new_profile(self):\r\n is_disabled = True if not self.profile_name else False\r\n create_new_profile = st.button(\"Create New Profile\", disabled=is_disabled, key=0)\r\n if( create_new_profile) and (self.profile_name != \"\"):\r\n if self.options_profile_name == \"One Profile\":\r\n empty_input = \"profile\"\r\n self.profile_list(empty_input)\r\n elif self.options_profile_name == \"Multi\":\r\n empty_input = \"profiles\"\r\n self.lst_profile_name = self.profile_name.split(\",\")\r\n for profile in self.lst_profile_name:\r\n self.profile_name = profile\r\n self.profile_list(empty_input)\r\n\r\n def profile_list(self, empty_input:str) -> None:\r\n my_bar = st.progress(0, text=f\"Creating {empty_input} please wait...\")\r\n for percent_complete in range(100):\r\n time.sleep(0.01)\r\n my_bar.progress(percent_complete + 1, text=f\"Creating {empty_input} please wait...\")\r\n time.sleep(1)\r\n my_bar.empty() \r\n self.check_brain_data() \r\n st.toast(f\"**:green[The new {empty_input} created successfully!]**\", icon='🎉')\r\n time.sleep(1) \r\n \r\n def check_brain_data(self):\r\n self.engine = enginev2.DNAEngine\r\n param_for_engine = {\r\n \"has_key\": self.create_hash_key,\r\n \"profile_name\": self.profile_name,\r\n \"length\": self.length_slider * 4,\r\n \"activate_merge\": self.activate_merge,\r\n \"save_cookies\": self.save_cookies,\r\n \"has_lower\": self.lowercase,\r\n \"has_upper\": self.uppercase,\r\n \"has_number\": self.numbers,\r\n \"has_symbol\": self.symbols,\r\n \"has_arabic\": self.arabic,\r\n }\r\n # Create a new instance of MainBuilder engine\r\n new_profile = self.engine(**param_for_engine)\r\n # Write the Dnakey profile to a file\r\n self.encrypted_profile_contents, self.profile_contents = new_profile.create_dnakey_profile()\r\n # Display a download button for the brain data file\r\n st.divider()\r\n self.download_profile_data()\r\n\r\n def download_profile_data(self):\r\n if self.options_profile_name == \"One Profile\":\r\n self.date_time = self.profile_contents.get(\"meta_data\", {}).get(\"date_time\", None)\r\n self.profile_version = self.profile_contents.get(\"meta_data\", {}).get(\"version\", None)\r\n self.profile_id = self.profile_contents.get(\"meta_data\", {}).get(\"profile_id\", None)\r\n self.request_status = self.profile_contents.get(\"meta_data\", {}).get(\"request_status\", None)\r\n self.profile_result()\r\n with st.expander(\"You can see your profile details before download it! [See More...]\"):\r\n incol1, incol2 = st.columns(2)\r\n with incol1:\r\n self.profile_meta_data()\r\n with incol2:\r\n self.profile_includes()\r\n self.prime_key_meta_data()\r\n elif self.options_profile_name == \"Multi Profiles\" and len(self.lst_profile_name) > 2:\r\n st.code(self.encrypted_profile_contents.decode(\"utf-8\"))\r\n\r\n def profile_result(self):\r\n col1, col2 = st.columns(2)\r\n with col1:\r\n st.write(\"Download Profile: (:red[Required])\")\r\n st.download_button(label='Download Data As DKP',\r\n data=self.encrypted_profile_contents,\r\n file_name=f'dnakey_{self.profile_name}.dkp')\r\n st.caption(\":green[Note]: You have to download the profile to use it in 'Create Password Tab'\") \r\n with col2:\r\n st.write(\"Copy Profile Content: (:green[Optional])\")\r\n st.text(self.encrypted_profile_contents.decode(\"utf-8\"))\r\n st.caption(\":red[WARNING]: Create a file with (:blue[.dkp]) in the end!\")\r\n\r\n def profile_meta_data(self):\r\n st.text(\"Profile MetaData!\")\r\n profile_meta_data = ['profile_id', 'profile_name', 'length', 'profile_quantity', 'activate_merge', 'save_cookies', 'request_status','date_time', 'version']\r\n profile_values = [self.profile_id, self.profile_name, self.length_slider, 'One Profile', self.activate_merge, self.save_cookies, self.request_status, self.date_time, self.profile_version]\r\n self.create_profile_data_frame(profile_meta_data, profile_values)\r\n\r\n def profile_includes(self):\r\n st.text(\"Profile Includes!\")\r\n st.code(f\"\"\"\r\n has_lower = {self.lowercase}\r\n has_upper = {self.uppercase}\r\n has_number = {self.numbers}\r\n has_symbol = {self.symbols}\r\n has_arabic = {self.arabic}\"\"\")\r\n\r\n def prime_key_meta_data(self):\r\n st.text(\"PrimeKey MetaData!\")\r\n active_profiles, active_profiles_ids = self.config_manager.get_active_profiles()\r\n st.code(f\"\"\"\r\n prime_key_id: '{self.config_has_key}'\r\n created_profiles: {self.created_profiles}\r\n active_profiles: {active_profiles} {active_profiles_ids}\r\n profiles_status: '{self.config_manager.get_online_profiles()}'\r\n date_time: '{self.config_manager.get_date_time()}'\"\"\")\r\n\r\n def create_profile_data_frame(self, profile_meta_data:list[str], profile_values:list[str | int | bool]) -> None:\r\n def load_data():\r\n return pd.DataFrame(\r\n {\r\n \"Profile MetaData\": profile_meta_data,\r\n \"Profile Values\": profile_values,\r\n }\r\n ) \r\n # Boolean to resize the dataframe, stored as a session state variable\r\n data_df = load_data()\r\n data_df['Profile Values'] = data_df['Profile Values'].astype(str)\r\n st.dataframe(data_df, use_container_width=500, hide_index=True) \r"
},
{
"identifier": "CreatePassword",
"path": "create_password.py",
"snippet": "class CreatePassword(PrimeKeyConfig):\r\n def create_new_password(self):\r\n key_id = \"prime-key-password\"\r\n self.hash_key = st.text_input(\"Enter Your Prime Key: (:red[Required])\", \r\n type=\"password\", \r\n help=\"Prime Key is your login token method so 'DnaKey' can recognize you!\", \r\n key=key_id)\r\n self.upload_hash_key = self.agent_prime_key(self.hash_key)\r\n if self.upload_hash_key == 1:\r\n self.tab_name = \"Create Password\"\r\n return\r\n else:\r\n self.tab_name = \"Create Password\"\r\n self.config_has_key = f\"dnakey${self.upload_hash_key[:32:2]}\"\r\n self.config_manager = ConfigManager(self.config_has_key) \r\n self.config_manager.update_date_time()\r\n self.config_manager.update_config()\r\n self.first_time = self.config_manager.check_active_usage() \r\n self.col3, self.col4 = st.columns(2)\r\n with self.col3:\r\n if self.first_time == \"first_time\":\r\n st.info(\"Hey there. It's look like your first time using your profile!\", icon=\"🏁\")\r\n with st.expander(\"Upload Profile [See More...]\"):\r\n self.uploaded_file = st.file_uploader(label=\"Upload Profile\",\r\n help=\"Required a dkp file only!\",\r\n accept_multiple_files=True,\r\n type=[\"dkp\"], key=\"file-01\",\r\n label_visibility=\"collapsed\") \r\n self.profile_data() \r\n self.uploaded_files_merge = self.uploaded_file\r\n self.create_password_advance_settings()\r\n with self.col4: \r\n if self.first_time == \"first_time\":\r\n st.success(\"Let me help you get your first password easily!\", icon=\"⚙\") \r\n if not self.uploaded_file:\r\n st.caption(\"Hello👋, To start creating your new password you need to upload your profile!\")\r\n if (self.uploaded_file) and (self.first_time == \"first_time\"):\r\n st.info(\"**_Active Profile For Now_**: Here you have to select the profile that you wanna use!\", icon=\"🛠\") \r\n self.input_for_user_key()\r\n self.decode_text_button() \r\n\r\n def profile_data(self):\r\n if not self.uploaded_file:\r\n return 1\r\n self.create_a_fernet_object()\r\n self.verify_uploaded_file()\r\n\r\n def create_a_fernet_object(self):\r\n # Create a Fernet object with the secret key\r\n secret_key = self.upload_hash_key.encode(\"utf-8\")\r\n self.fernet = Fernet(secret_key)\r\n\r\n def verify_uploaded_file(self):\r\n self.uploaded_file_unpack = []\r\n for file in range(len(self.uploaded_file)):\r\n encrypted_data = self.uploaded_file[file].read().decode(\"utf-8\")\r\n try:\r\n decrypted_data = self.fernet.decrypt(encrypted_data)\r\n decrypted_string = decrypted_data.decode()\r\n self.data = json.loads(decrypted_string)\r\n self.uploaded_file_unpack.append(self.data)\r\n except Exception:\r\n with self.col4:\r\n invalid_profile_name = self.uploaded_file[file].name[:-4].replace('dnakey_', '')\r\n st.error(f\"This is not a dnakey profile! '{invalid_profile_name}'\")\r\n time.sleep(0.5)\r\n st.info(\"If you don't know you can create a 'dnakey' profile in 'Create Profile window' in your left!\", icon=\"ℹ️\")\r\n st.stop() \r\n if len(self.uploaded_file_unpack) == 1:\r\n st.toast(\"**:green[The Profile Data Is Live...]**\", icon=\"🍰\") \r\n time.sleep(1)\r\n else:\r\n st.toast(\"**:blue[Your Profiles Data Is Live...]**\", icon=\"🍬\") \r\n time.sleep(1)\r\n\r\n def create_password_advance_settings(self):\r\n if self.uploaded_file and self.upload_hash_key:\r\n if self.first_time == \"first_time\":\r\n st.info(\"**_Advance Settings_**: Here you have the Merge it's an advanced method. You won't need it for now!\", icon=\"🧪\") \r\n with st.expander(\"Advance Settings [See More...]\"): \r\n self.new_merge_profile = CreateMergeProfile(self.uploaded_file_unpack, self.uploaded_files_merge)\r\n self.new_merge_profile.merge_builder()\r\n st.caption(\":red[Warning]: The profiles must have the same length!\")\r\n if self.first_time == \"first_time\":\r\n st.warning(\"**_Activate Random_**: If you activate this it gonna give you a temporary random password!\", icon=\"⚠\") \r\n self.activate_random_on = st.toggle(\"Activate Random: (:green[Optional])\")\r\n\r\n def select_file_option(self):\r\n MAX_UPLOADS = 11\r\n self.uploaded_length = len(self.uploaded_file)\r\n merge_options_name_length = len(self.new_merge_profile.merge_options_name)\r\n is_disabled = False if merge_options_name_length < 1 and self.uploaded_length != 1 else True\r\n if (self.uploaded_file) and (self.upload_hash_key) and (self.uploaded_length < MAX_UPLOADS):\r\n self.file_names_options = [self.uploaded_file[i].name[:-4].replace('dnakey_', '') for i in range(self.uploaded_length)] \r\n self.options = st.selectbox(\"Active Profile For Now: (:green[Live..])\", (self.file_names_options), disabled=is_disabled)\r\n if self.options: \r\n for j in range(self.uploaded_length):\r\n if self.options == self.file_names_options[j]:\r\n return self.uploaded_file_unpack[j]\r\n else:\r\n st.error(\"You can't upload more than 10 profile at once!\", icon='🚨')\r\n exit()\r\n\r\n def input_for_user_key(self):\r\n if not self.uploaded_file:\r\n return 1\r\n with self.col4:\r\n self.uploaded_file = self.select_file_option()\r\n max_length_input = self.uploaded_file.get(\"meta_data\", {}).get(\"profile_length\", None) // 4\r\n if self.first_time == \"first_time\":\r\n st.info(\"**_Enter Your Key_**: Here you can enter a sample word to encode it to a complex password!\", icon=\"🧬\") \r\n self.key_input = st.text_input(\"Enter Your Key: (:red[Required])\", \r\n max_chars=max_length_input,\r\n placeholder=\"Example: Blue / Cat...\",\r\n type=\"password\", \r\n key=\"input-00\")\r\n if self.first_time == \"first_time\":\r\n st.warning(\"**_Warning_**: You have to remember the word that you entered here!\", icon=\"⚠\") \r\n if not self.key_input:\r\n return 1\r\n self.check_input_for_user_key()\r\n\r\n def check_input_for_user_key(self):\r\n valid_pattern = re.compile(\"^[a-zA-Z0-9]+$\")\r\n if valid_pattern.search(self.key_input):\r\n self.key_input = self.key_input\r\n get_profile_name = self.uploaded_file.get(\"meta_data\", {}).get(\"profile_name\", None)\r\n get_request_status = self.uploaded_file.get(\"meta_data\", {}).get(\"request_status\", None)\r\n if not self.new_merge_profile.merge_options_name:\r\n st.toast(f\"**:orange[Profile {get_profile_name} Is {get_request_status}...]**\", icon=\"🍕\")\r\n time.sleep(1)\r\n else:\r\n st.caption(\":red[Warning]: If you don't know 'Dnakey' dont support (**symbols or empty spaces!**)\")\r\n self.key_input = None \r\n\r\n def decode_text_button(self):\r\n if not self.uploaded_file:\r\n return 1\r\n is_disabled = True if not self.key_input else False\r\n create_password_button = st.button(\"Create Password!\", disabled=is_disabled, key=1)\r\n if create_password_button:\r\n self.decode_profile_data() \r\n\r\n def decode_profile_data(self):\r\n self.key_input_list = self.key_input.strip()\r\n if self.key_input:\r\n if (self.uploaded_file) and (not self.new_merge_profile.merge_options_name):\r\n self.data = self.uploaded_file\r\n elif (self.uploaded_file) and (self.new_merge_profile.merge_options_name):\r\n self.data = self.new_merge_profile.profile_data_merge\r\n valid_hash_key = f\"dnakey${self.upload_hash_key[:32:2]}\"\r\n main_builder = MainBuilder(valid_hash_key, self.data)\r\n main_builder.dna_builder(self.key_input_list, self.activate_random_on)\r"
},
{
"identifier": "Feedbacks",
"path": "feedbacks/send_feedbacks.py",
"snippet": "class Feedbacks(PrimeKeyConfig):\r\n def __init__(self):\r\n with open(\"feedbacks/feedbacks_config.json\", \"r\") as f:\r\n self.feedback_data = json.loads(f.read())\r\n\r\n def feedbacks(self):\r\n key_id = \"prime-key-feedback\"\r\n self.hash_key = st.text_input(\"Enter Your Prime Key: (:red[Required])\", \r\n type=\"password\", \r\n help=\"Prime Key is your login token method so 'DnaKey' can recognize you!\", \r\n key=key_id)\r\n self.hash_key_feedback = self.agent_prime_key(self.hash_key)\r\n if self.hash_key_feedback != 1:\r\n self.config_has_key = f\"dnakey${self.hash_key_feedback[:32:2]}\"\r\n self.config_manager = ConfigManager(self.config_has_key) \r\n self.config_manager.update_date_time()\r\n self.config_manager.update_config()\r\n self.send_feedback_form()\r\n self.check_feedback_prime_key()\r\n self.display_feedback_result()\r\n self.get_feedbacks()\r\n\r\n def send_feedback_form(self):\r\n with st.form(\"send_feedback\", clear_on_submit=True):\r\n st.write(\"Send Feedback!\")\r\n self.user_feedback = st.text_area(\"Please Enter Your feedback: (:red[Required])\", \r\n max_chars=300, \r\n placeholder=\"You can leave your feedback here!\")\r\n self.user_rate = st.multiselect(\"Rate This with: (:green[Optional])\", \r\n options=[\"🔐🔐🔐\", \"🔐🔐🔐🔐\", \"🔐🔐🔐🔐🔐\"], \r\n default=\"🔐🔐🔐🔐🔐\", \r\n max_selections=1)\r\n self.submitted = st.form_submit_button(\"Submit\")\r\n\r\n def display_feedback_result(self):\r\n if self.user_feedback and self.submitted:\r\n with st.chat_message(\"user\"): \r\n st.write(\"Your Feedback!\") \r\n st.text(self.user_feedback)\r\n self.user_rate = f\"Rate: {self.user_rate}\" if self.user_rate else \"\"\r\n st.text(self.user_rate)\r\n\r\n def check_feedback_prime_key(self):\r\n self.feedback_has_key = f\"dnakey${self.hash_key_feedback[:32:2]}\"\r\n if (self.submitted) and (self.feedback_has_key in list(self.feedback_data.get(\"feedbacks\").keys())):\r\n st.info(\"You are already send as you feedback!\")\r\n exit()\r\n\r\n def get_feedbacks(self):\r\n if self.user_feedback and self.submitted:\r\n create_date = datetime.datetime.now()\r\n formatted_datetime = create_date.isoformat()\r\n feedback_data = {\r\n \"user_feedback\": self.user_feedback, \r\n \"user_rate\": (len(self.user_rate)-6), \r\n \"date_time\": formatted_datetime\r\n }\r\n self.feedback_data.get(\"feedbacks\").update({self.feedback_has_key: feedback_data})\r\n with open(\"feedbacks/feedbacks_config.json\", \"w\") as f:\r\n json.dump(self.feedback_data, f, indent=3)\r"
},
{
"identifier": "CO_FOUNDER",
"path": "license/license_manager.py",
"snippet": "CO_FOUNDER = license_data[\"co_founder\"]\r"
},
{
"identifier": "VERSION",
"path": "license/license_manager.py",
"snippet": "VERSION = license_data[\"version\"]\r"
}
] | import streamlit as st
from create_profile import CreateProfile
from create_password import CreatePassword
from feedbacks.send_feedbacks import Feedbacks
from license.license_manager import CO_FOUNDER, VERSION
| 5,133 |
# Set Streamlit page configuration
st.set_page_config(
page_title="Dnakey",
page_icon="🔐",
layout="centered",
)
|
# Set Streamlit page configuration
st.set_page_config(
page_title="Dnakey",
page_icon="🔐",
layout="centered",
)
| class App(CreateProfile, CreatePassword, Feedbacks):
| 0 | 2023-12-18 22:04:13+00:00 | 8k |
tamnva/hydroecolstm | examples/customLSTM_deleteme.py | [
{
"identifier": "Scaler",
"path": "hydroecolstm/utility/scaler.py",
"snippet": "class Scaler:\n def fit(self, x=None, method=None):\n # concatenat all object_id\n for i, object_id in zip(range(len(x)), x):\n if i == 0:\n cat_x = x[object_id]\n else:\n cat_x = torch.cat((cat_x, x[object_id]),0)\n \n # Get either min max or mean and standard deviation \n self.mins = _column_mins(cat_x)\n self.maxs = _column_maxs(cat_x)\n \n # Get means and standar deviation\n self.means = _column_means(cat_x)\n self.stds = _column_stds(cat_x)\n \n scaler_a = []\n scaler_b = []\n \n for i, method_name in zip(range(len(method)), method):\n if method_name == \"MinMaxScaler\":\n scaler_a.append(self.mins[i])\n scaler_b.append(self.maxs[i] - self.mins[i])\n elif method_name==\"Z-score\":\n scaler_a.append(self.means[i])\n scaler_b.append(self.stds[i])\n elif method_name==\"None\":\n scaler_a.append(0.0)\n scaler_b.append(1.0)\n else:\n print(\"Error: unknown scaler\")\n SystemExit(\"Program stop, please change scaler\")\n \n scaler_ab = torch.cat((torch.tensor(scaler_a, dtype=torch.float32),\n torch.tensor(scaler_b, dtype=torch.float32)), 0)\n \n self.scaler_parameter = torch.reshape(scaler_ab, \n (2,len(scaler_a)))\n \n def transform(self, x:dict[str:torch.tensor]=None) -> list: \n x_scale = {}\n for object_id in x:\n x_scale[object_id] = torch.div(torch.sub(x[object_id], \n self.scaler_parameter[0,:]), \n self.scaler_parameter[1,:]) \n return x_scale\n\n def inverse(self, x:list=None) -> list: \n x_inverse = {}\n for object_id in x:\n x_inverse[object_id] = torch.add(self.scaler_parameter[0,:],\n x[object_id]*self.scaler_parameter[1,:])\n return x_inverse"
},
{
"identifier": "get_scaler_name",
"path": "hydroecolstm/utility/scaler.py",
"snippet": "def get_scaler_name(config):\n \n if \"input_static_features\" not in config.keys():\n no_static_features = 0\n else:\n no_static_features = len(config[\"input_static_features\"])\n \n \n # Get name of scaler for dynamic input\n scaler_name_input = config[\"scaler_input_dynamic_features\"]*\\\n len(config[\"input_dynamic_features\"])\n \n # replicate this n times\n if no_static_features > 0 and\\\n \"scaler_input_static_features\" in config:\n for name in config[\"scaler_input_static_features\"]*no_static_features:\n scaler_name_input.append(name)\n \n # scaler name target\n scaler_name_target = config[\"scaler_target_features\"]*len(config[\"target_features\"])\n \n return scaler_name_input, scaler_name_target"
},
{
"identifier": "read_train_test_data",
"path": "hydroecolstm/data/read_data.py",
"snippet": "def read_train_test_data(config:dict=None) -> dict:\n \n # Read input data \n dynamic_data = pd.read_csv(config['dynamic_data_file'][0], \n delimiter=\",\", header=0) \n dynamic_data[\"time\"] = pd.to_datetime(dynamic_data[\"time\"], \n format = \"%Y-%m-%d %H:%M\")\n \n # The column names must contains the following names\n require_columns = [\"object_id\",\"time\"]\n require_columns.extend(config['input_dynamic_features'])\n require_columns.extend(config['target_features']) \n \n # Check if data header contains required names \n for name in require_columns: \n if name not in dynamic_data.columns:\n raise NameError(f\"Error: missing column '{name}' in dynamic data file \\n\")\n \n # Subset of dynamic_data - only use the required columns, rows\n dynamic_data = dynamic_data[require_columns]\n dynamic_data.set_index(\"object_id\", inplace=True)\n dynamic_data = dynamic_data.loc[config[\"object_id\"]]\n \n config[\"train_period\"] = pd.to_datetime(config[\"train_period\"], \n format = \"%Y-%m-%d %H:%M\")\n train_data = dynamic_data[(dynamic_data[\"time\"] >= config[\"train_period\"][0]) &\n (dynamic_data[\"time\"] <= config[\"train_period\"][1])]\n\n # Colum name of the ouput tensor\n x_column_name = config['input_dynamic_features'].copy()\n y_column_name = config['target_features'].copy()\n \n # split to train data by object id \n x_train = _split_by_object_id(train_data[x_column_name], config[\"object_id\"])\n y_train = _split_by_object_id(train_data[y_column_name], config[\"object_id\"])\n time_train = _time_by_object_id(train_data, config[\"object_id\"])\n\n \n config[\"test_period\"] = pd.to_datetime(config[\"test_period\"], format = \"%Y-%m-%d\")\n test_data = dynamic_data[(dynamic_data[\"time\"] >= config[\"test_period\"][0]) &\n (dynamic_data[\"time\"] <= config[\"test_period\"][1])]\n \n x_test = _split_by_object_id(test_data[x_column_name], config[\"object_id\"])\n y_test = _split_by_object_id(test_data[y_column_name], config[\"object_id\"])\n time_test = _time_by_object_id(test_data, config[\"object_id\"])\n \n # Read static input data file \n if 'input_static_features' in config:\n if len(config['input_static_features']) > 0:\n static_data = pd.read_csv(config['static_data_file'][0], delimiter=\",\", \n header=0)\n # The column names must contains the following names\n require_columns = [\"object_id\"]\n require_columns.extend(config['input_static_features']) \n \n # Check if data header contains required names\n for name in require_columns: \n if name not in static_data.columns:\n raise NameError(f\"Error: missing column '{name}' in static data\\n\")\n \n # Subset of dynamic_data - only use the required columns and rows\n static_data = static_data[require_columns]\n static_data.set_index(\"object_id\", inplace=True)\n static_data = torch.tensor(static_data.loc[config[\"object_id\"]].values,\n dtype=torch.float32)\n \n # Update columne name\n x_column_name.extend(config['input_static_features'])\n \n else:\n static_data = None\n \n # add static data to x_train and y_train\n if static_data is not None:\n for i, object_id in zip(range(len(x_train)), x_train):\n rep_static_data = static_data[i,].repeat(x_train[object_id].shape[0],1)\n x_train[object_id] = torch.cat((x_train[object_id], rep_static_data), 1)\n\n rep_static_data = static_data[i,].repeat(x_test[object_id].shape[0],1)\n x_test[object_id] = torch.cat((x_test[object_id], rep_static_data), 1)\n\n return {\"x_train\":x_train, \"y_train\": y_train, \"time_train\" : time_train, \n \"x_test\":x_test, \"y_test\": y_test, \"time_test\": time_test,\n \"x_column_name\": x_column_name, \"y_column_name\": y_column_name}"
},
{
"identifier": "read_config",
"path": "hydroecolstm/data/read_config.py",
"snippet": "def read_config(config_file):\n \n # Open and read the configureation file\n with open(config_file, 'r') as file:\n cfg = yaml.safe_load(file)\n \n # All required keywords\n key = [\"object_id\",\"input_dynamic_features\", \"target_features\", \n \"train_period\", \"test_period\", \"n_epochs\", \"learning_rate\", \n \"dynamic_data_file\"]\n \n # Check if required keywords are missing\n for keyword in key: \n if keyword not in cfg:\n raise NameError(f\"Error in configuration file, keyword '{keyword}' is missing\")\n \n #checkiftraiistest\n\n # Return output\n return cfg"
},
{
"identifier": "Lstm_Linears",
"path": "hydroecolstm/model/lstm_linears.py",
"snippet": "class Lstm_Linears(nn.Module):\n def __init__(self, config, **kwargs):\n \n super(Lstm_Linears, self).__init__()\n\n # Model structure parametery\n self.input_size = self.get_input_size(config)\n self.output_size = len(config[\"target_features\"])\n self.hidden_size = config[\"hidden_size\"]\n self.num_layers = config[\"num_layers\"]\n self.dropout = config[\"dropout\"]*min(1.0, self.num_layers - 1.0)\n self.linears_num_layers = config[\"REG\"][\"num_layers\"]\n self.linears_activation_function = config[\"REG\"][\"activation_function\"]\n self.linears_num_neurons = self.find_num_neurons(config=config) \n \n # Standard LSTM from torch\n self.lstm = nn.LSTM(input_size=self.input_size, \n hidden_size=self.hidden_size, \n num_layers=self.num_layers,\n dropout=self.dropout,\n **kwargs)\n \n # Fully-connected layer connect hidden and output\n self.linear = Linears(num_layers=self.linears_num_layers, \n activation_function=self.linears_activation_function,\n num_neurons=self.linears_num_neurons)\n \n def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \n y_predict = {}\n \n for key in x.keys():\n # get standard LSTM outputs\n y_lstm, _ = self.lstm(x[key])\n # get final output \n y_predict[key] = self.linear(y_lstm) \n \n # return output\n return y_predict\n \n # get input size\n def get_input_size(self, config) -> int:\n if \"input_static_features\" in config:\n input_size = (len(config[\"input_dynamic_features\"]) + \n len(config[\"input_static_features\"]))\n else:\n input_size = len(config[\"input_dynamic_features\"])\n return input_size\n \n # Find number of neuron in each linear layers, including the input layer\n def find_num_neurons(self, config) -> int:\n # First number of neurons from the input layers ()\n num_neurons = [self.hidden_size]\n\n if \"REG\" in config:\n if len(config[\"REG\"][\"num_neurons\"]) > 1:\n for i in range(len(config[\"REG\"][\"num_neurons\"])-1):\n num_neurons.append(config[\"REG\"][\"num_neurons\"][i])\n num_neurons.append(self.output_size)\n\n return num_neurons"
},
{
"identifier": "Ea_Lstm_Linears",
"path": "hydroecolstm/model/ea_lstm.py",
"snippet": "class Ea_Lstm_Linears(nn.Module):\n def __init__(self, config):\n \n super(Ea_Lstm_Linears, self).__init__()\n \n self.static_size = len(config[\"input_static_features\"])\n self.dynamic_size = len(config[\"input_dynamic_features\"])\n self.num_layers = config[\"num_layers\"]\n self.hidden_size = config[\"hidden_size\"]\n self.output_size = len(config[\"target_features\"])\n self.linears_num_layers = config[\"REG\"][\"num_layers\"]\n self.linears_activation_function = config[\"REG\"][\"activation_function\"]\n self.linears_num_neurons = self.find_num_neurons(config=config) \n\n # Model structure\n self.i = nn.Sequential(nn.Linear(self.static_size, self.hidden_size), nn.Sigmoid()) \n self.f = MultiLinear(self.dynamic_size, self.hidden_size, self.hidden_size, nn.Sigmoid())\n self.g = MultiLinear(self.dynamic_size, self.hidden_size, self.hidden_size, nn.Tanh())\n self.o = MultiLinear(self.dynamic_size, self.hidden_size, self.hidden_size, nn.Sigmoid())\n self.linear = Linears(num_layers=self.linears_num_layers, \n activation_function=self.linears_activation_function,\n num_neurons=self.linears_num_neurons)\n \n # TODO: This forward function takes too much times, need to improve\n def forward(self, x):\n # Initial hidden, cell states\n c_t = torch.randn(self.hidden_size).unsqueeze(0)\n h_t = torch.randn(self.hidden_size).unsqueeze(0)\n\n # Forward run\n output = {}\n for key in x.keys(): \n output[key] = torch.zeros(size=(x[key].shape[0],self.output_size))\n for i in range(x[key].shape[0]):\n i_t = self.i(x[key][i:i+1,self.dynamic_size:])\n f_t = self.f(x[key][i:i+1,:self.dynamic_size], h_t)\n g_t = self.g(x[key][i:i+1,:self.dynamic_size], h_t) \n o_t = self.o(x[key][i:i+1,:self.dynamic_size], h_t)\n \n c_t = f_t*c_t + i_t*g_t\n h_t = o_t*torch.tanh(c_t)\n \n output[key][i,:] = self.linear(h_t)\n\n return output\n \n # Find number of neuron in each linear layers, including the input layer\n def find_num_neurons(self, config) -> int:\n # First number of neurons from the input layers ()\n num_neurons = [self.hidden_size]\n\n if \"REG\" in config:\n if len(config[\"REG\"][\"num_neurons\"]) > 1:\n for i in range(len(config[\"REG\"][\"num_neurons\"])-1):\n num_neurons.append(config[\"REG\"][\"num_neurons\"][i])\n num_neurons.append(self.output_size)\n\n return num_neurons"
},
{
"identifier": "Train",
"path": "hydroecolstm/model/train.py",
"snippet": "class Train():\n def __init__(self, config, model, **kwargs):\n \n super(Train, self).__init__()\n\n # Training parameters\n self.lr = config[\"learning_rate\"]\n self.objective_function_name = config[\"objective_function_name\"]\n self.n_epochs = config[\"n_epochs\"]\n self.nskip = config[\"warmup_length\"]\n self.loss_function = LossFunction()\n self.model = model\n \n # Train function\n def __call__(self, x: Dict[str, torch.Tensor], y: Dict[str, torch.Tensor]):\n \n # Optimization function\n optim = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n \n # Train the model\n for epoch in range(self.n_epochs):\n \n # Get model output\n y_predict= self.model(x)\n\n # Reset the gradients to zero\n optim.zero_grad()\n \n # Caculate loss function\n loss, loss_avg =\\\n self.loss_function(y_true=y, y_predict=y_predict, nskip=self.nskip,\n objective_function_name=self.objective_function_name)\n \n # Error back propagation LSTM.state_dict()\n loss_avg.backward()\n \n # Update weights and biases\n optim.step()\n \n # Print to console\n print(f\"Epoch [{epoch+1}/{self.n_epochs}], avg_loss = {loss_avg:.8f}\")\n \n return self.model, y_predict"
}
] | import numbers
import warnings
import torch
import torch.jit as jit
import torch.nn as nn
from collections import namedtuple
from typing import List, Tuple, Dict
from torch import Tensor
from torch.nn import Parameter
from hydroecolstm.utility.scaler import Scaler, get_scaler_name
from hydroecolstm.data.read_data import read_train_test_data
from hydroecolstm.data.read_config import read_config
from hydroecolstm.model.lstm_linears import Lstm_Linears
from hydroecolstm.model.ea_lstm import Ea_Lstm_Linears
from hydroecolstm.model.train import Train | 4,335 |
class EALSTMCell(jit.ScriptModule):
def __init__(self, dynamic_input_size, static_input_size, hidden_size):
super().__init__()
self.dynamic_input_size = dynamic_input_size
self.static_input_size = static_input_size
self.hidden_size = hidden_size
self.weight_sh = Parameter(torch.randn(hidden_size, static_input_size))
self.weight_dh = Parameter(torch.randn(3 * hidden_size, dynamic_input_size))
self.weight_hh = Parameter(torch.randn(3 * hidden_size, hidden_size))
self.bias_sh = Parameter(torch.randn(hidden_size))
self.bias_dh = Parameter(torch.randn(3 * hidden_size))
self.bias_hh = Parameter(torch.randn(3 * hidden_size))
@jit.script_method
def forward(self, dynamic_input: Tensor, static_input: Tensor,
state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
# Initial state
hx, cx = state
# Gate input
gates = (torch.mm(dynamic_input, self.weight_dh.t())
+ self.bias_dh
+ torch.mm(hx, self.weight_hh.t())
+ self.bias_hh)
forgetgate, cellgate, outgate = gates.chunk(3, 1)
ingate = torch.mm(static_input, self.weight_sh.t()) + self.bias_sh
# Gate output
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
# Update state
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
# Return state output
return hy, (hy, cy)
class EALSTMLayer(jit.ScriptModule):
def __init__(self, config):
super().__init__()
self.dynamic_input_size = len(config["input_dynamic_features"])
self.static_input_size = len(config["input_static_features"])
self.hidden_size = config["hidden_size"]
self.cell = EALSTMCell(self.dynamic_input_size, self.static_input_size,
self.hidden_size)
@jit.script_method
def forward(self, dynamic_input,
static_input,
state:Tuple[Tensor, Tensor]):
if state is None:
hx = torch.rand(self.hidden_size)
cx = torch.rand(self.hidden_size)
state = hx, cx
for i in range(len(dynamic_input)):
output, state = self.cell(dynamic_input[i:i+1,:],
static_input, state)
if i == 0:
outputs = output
else:
outputs = torch.cat((outputs, output), 0)
return output, state
#
config_file = "C:/Users/nguyenta/Documents/GitHub/HydroEcoLSTM/examples/experiments/config.yml"
# Load configuration
|
class EALSTMCell(jit.ScriptModule):
def __init__(self, dynamic_input_size, static_input_size, hidden_size):
super().__init__()
self.dynamic_input_size = dynamic_input_size
self.static_input_size = static_input_size
self.hidden_size = hidden_size
self.weight_sh = Parameter(torch.randn(hidden_size, static_input_size))
self.weight_dh = Parameter(torch.randn(3 * hidden_size, dynamic_input_size))
self.weight_hh = Parameter(torch.randn(3 * hidden_size, hidden_size))
self.bias_sh = Parameter(torch.randn(hidden_size))
self.bias_dh = Parameter(torch.randn(3 * hidden_size))
self.bias_hh = Parameter(torch.randn(3 * hidden_size))
@jit.script_method
def forward(self, dynamic_input: Tensor, static_input: Tensor,
state: Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
# Initial state
hx, cx = state
# Gate input
gates = (torch.mm(dynamic_input, self.weight_dh.t())
+ self.bias_dh
+ torch.mm(hx, self.weight_hh.t())
+ self.bias_hh)
forgetgate, cellgate, outgate = gates.chunk(3, 1)
ingate = torch.mm(static_input, self.weight_sh.t()) + self.bias_sh
# Gate output
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
# Update state
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
# Return state output
return hy, (hy, cy)
class EALSTMLayer(jit.ScriptModule):
def __init__(self, config):
super().__init__()
self.dynamic_input_size = len(config["input_dynamic_features"])
self.static_input_size = len(config["input_static_features"])
self.hidden_size = config["hidden_size"]
self.cell = EALSTMCell(self.dynamic_input_size, self.static_input_size,
self.hidden_size)
@jit.script_method
def forward(self, dynamic_input,
static_input,
state:Tuple[Tensor, Tensor]):
if state is None:
hx = torch.rand(self.hidden_size)
cx = torch.rand(self.hidden_size)
state = hx, cx
for i in range(len(dynamic_input)):
output, state = self.cell(dynamic_input[i:i+1,:],
static_input, state)
if i == 0:
outputs = output
else:
outputs = torch.cat((outputs, output), 0)
return output, state
#
config_file = "C:/Users/nguyenta/Documents/GitHub/HydroEcoLSTM/examples/experiments/config.yml"
# Load configuration | config = read_config(config_file) | 3 | 2023-12-20 09:11:36+00:00 | 8k |
camenduru/OpenLRM-hf | lrm/models/rendering/synthesizer.py | [
{
"identifier": "ImportanceRenderer",
"path": "lrm/models/rendering/utils/renderer.py",
"snippet": "class ImportanceRenderer(torch.nn.Module):\n \"\"\"\n Modified original version to filter out-of-box samples as TensoRF does.\n \n Reference:\n TensoRF: https://github.com/apchenstu/TensoRF/blob/main/models/tensorBase.py#L277\n \"\"\"\n def __init__(self):\n super().__init__()\n self.activation_factory = self._build_activation_factory()\n self.ray_marcher = MipRayMarcher2(self.activation_factory)\n self.plane_axes = generate_planes()\n\n def _build_activation_factory(self):\n def activation_factory(options: dict):\n if options['clamp_mode'] == 'softplus':\n return lambda x: F.softplus(x - 1) # activation bias of -1 makes things initialize better\n else:\n assert False, \"Renderer only supports `clamp_mode`=`softplus`!\"\n return activation_factory\n\n def _forward_pass(self, depths: torch.Tensor, ray_directions: torch.Tensor, ray_origins: torch.Tensor,\n planes: torch.Tensor, decoder: nn.Module, rendering_options: dict):\n \"\"\"\n Additional filtering is applied to filter out-of-box samples.\n Modifications made by Zexin He.\n \"\"\"\n\n # context related variables\n batch_size, num_rays, samples_per_ray, _ = depths.shape\n device = depths.device\n\n # define sample points with depths\n sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3)\n sample_coordinates = (ray_origins.unsqueeze(-2) + depths * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)\n\n # filter out-of-box samples\n mask_inbox = \\\n (rendering_options['sampler_bbox_min'] <= sample_coordinates) & \\\n (sample_coordinates <= rendering_options['sampler_bbox_max'])\n mask_inbox = mask_inbox.all(-1)\n\n # forward model according to all samples\n _out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)\n\n # set out-of-box samples to zeros(rgb) & -inf(sigma)\n SAFE_GUARD = 3\n DATA_TYPE = _out['sigma'].dtype\n colors_pass = torch.zeros(batch_size, num_rays * samples_per_ray, 3, device=device, dtype=DATA_TYPE)\n densities_pass = torch.nan_to_num(torch.full((batch_size, num_rays * samples_per_ray, 1), -float('inf'), device=device, dtype=DATA_TYPE)) / SAFE_GUARD\n colors_pass[mask_inbox], densities_pass[mask_inbox] = _out['rgb'][mask_inbox], _out['sigma'][mask_inbox]\n\n # reshape back\n colors_pass = colors_pass.reshape(batch_size, num_rays, samples_per_ray, colors_pass.shape[-1])\n densities_pass = densities_pass.reshape(batch_size, num_rays, samples_per_ray, densities_pass.shape[-1])\n\n return colors_pass, densities_pass\n\n def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options):\n # self.plane_axes = self.plane_axes.to(ray_origins.device)\n\n if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto':\n ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp'])\n is_ray_valid = ray_end > ray_start\n if torch.any(is_ray_valid).item():\n ray_start[~is_ray_valid] = ray_start[is_ray_valid].min()\n ray_end[~is_ray_valid] = ray_start[is_ray_valid].max()\n depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n else:\n # Create stratified depth samples\n depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n\n # Coarse Pass\n colors_coarse, densities_coarse = self._forward_pass(\n depths=depths_coarse, ray_directions=ray_directions, ray_origins=ray_origins,\n planes=planes, decoder=decoder, rendering_options=rendering_options)\n\n # Fine Pass\n N_importance = rendering_options['depth_resolution_importance']\n if N_importance > 0:\n _, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n depths_fine = self.sample_importance(depths_coarse, weights, N_importance)\n\n colors_fine, densities_fine = self._forward_pass(\n depths=depths_fine, ray_directions=ray_directions, ray_origins=ray_origins,\n planes=planes, decoder=decoder, rendering_options=rendering_options)\n\n all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse,\n depths_fine, colors_fine, densities_fine)\n\n # Aggregate\n rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options)\n else:\n rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n return rgb_final, depth_final, weights.sum(2)\n\n def run_model(self, planes, decoder, sample_coordinates, sample_directions, options):\n plane_axes = self.plane_axes.to(planes.device)\n sampled_features = sample_from_planes(plane_axes, planes, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])\n\n out = decoder(sampled_features, sample_directions)\n if options.get('density_noise', 0) > 0:\n out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise']\n return out\n\n def run_model_activated(self, planes, decoder, sample_coordinates, sample_directions, options):\n out = self.run_model(planes, decoder, sample_coordinates, sample_directions, options)\n out['sigma'] = self.activation_factory(options)(out['sigma'])\n return out\n\n def sort_samples(self, all_depths, all_colors, all_densities):\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n return all_depths, all_colors, all_densities\n\n def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2):\n all_depths = torch.cat([depths1, depths2], dim = -2)\n all_colors = torch.cat([colors1, colors2], dim = -2)\n all_densities = torch.cat([densities1, densities2], dim = -2)\n\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n\n return all_depths, all_colors, all_densities\n\n def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False):\n \"\"\"\n Return depths of approximately uniformly spaced samples along rays.\n \"\"\"\n N, M, _ = ray_origins.shape\n if disparity_space_sampling:\n depths_coarse = torch.linspace(0,\n 1,\n depth_resolution,\n device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = 1/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse)\n else:\n if type(ray_start) == torch.Tensor:\n depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3)\n depth_delta = (ray_end - ray_start) / (depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None]\n else:\n depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = (ray_end - ray_start)/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n\n return depths_coarse\n\n def sample_importance(self, z_vals, weights, N_importance):\n \"\"\"\n Return depths of importance sampled points along rays. See NeRF importance sampling for more.\n \"\"\"\n with torch.no_grad():\n batch_size, num_rays, samples_per_ray, _ = z_vals.shape\n\n z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray)\n weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher\n\n # smooth weights\n weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1)\n weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze()\n weights = weights + 0.01\n\n z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])\n importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1],\n N_importance).detach().reshape(batch_size, num_rays, N_importance, 1)\n return importance_z_vals\n\n def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):\n \"\"\"\n Sample @N_importance samples from @bins with distribution defined by @weights.\n Inputs:\n bins: (N_rays, N_samples_+1) where N_samples_ is \"the number of coarse samples per ray - 2\"\n weights: (N_rays, N_samples_)\n N_importance: the number of samples to draw from the distribution\n det: deterministic or not\n eps: a small number to prevent division by zero\n Outputs:\n samples: the sampled samples\n \"\"\"\n N_rays, N_samples_ = weights.shape\n weights = weights + eps # prevent division by zero (don't do inplace op!)\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)\n cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function\n cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)\n # padded to 0~1 inclusive\n\n if det:\n u = torch.linspace(0, 1, N_importance, device=bins.device)\n u = u.expand(N_rays, N_importance)\n else:\n u = torch.rand(N_rays, N_importance, device=bins.device)\n u = u.contiguous()\n\n inds = torch.searchsorted(cdf, u, right=True)\n below = torch.clamp_min(inds-1, 0)\n above = torch.clamp_max(inds, N_samples_)\n\n inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)\n cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)\n bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)\n\n denom = cdf_g[...,1]-cdf_g[...,0]\n denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled\n # anyway, therefore any value for it is fine (set to 1 here)\n\n samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])\n return samples"
},
{
"identifier": "RaySampler",
"path": "lrm/models/rendering/utils/ray_sampler.py",
"snippet": "class RaySampler(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None\n\n\n def forward(self, cam2world_matrix, intrinsics, render_size):\n \"\"\"\n Create batches of rays and return origins and directions.\n\n cam2world_matrix: (N, 4, 4)\n intrinsics: (N, 3, 3)\n render_size: int\n\n ray_origins: (N, M, 3)\n ray_dirs: (N, M, 2)\n \"\"\"\n\n N, M = cam2world_matrix.shape[0], render_size**2\n cam_locs_world = cam2world_matrix[:, :3, 3]\n fx = intrinsics[:, 0, 0]\n fy = intrinsics[:, 1, 1]\n cx = intrinsics[:, 0, 2]\n cy = intrinsics[:, 1, 2]\n sk = intrinsics[:, 0, 1]\n\n uv = torch.stack(torch.meshgrid(\n torch.arange(render_size, dtype=torch.float32, device=cam2world_matrix.device),\n torch.arange(render_size, dtype=torch.float32, device=cam2world_matrix.device),\n indexing='ij',\n ))\n uv = uv.flip(0).reshape(2, -1).transpose(1, 0)\n uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1)\n\n x_cam = uv[:, :, 0].view(N, -1) * (1./render_size) + (0.5/render_size)\n y_cam = uv[:, :, 1].view(N, -1) * (1./render_size) + (0.5/render_size)\n z_cam = torch.ones((N, M), device=cam2world_matrix.device)\n\n x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam\n y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam\n\n cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1)\n\n _opencv2blender = torch.tensor([\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1],\n ], dtype=torch.float32, device=cam2world_matrix.device).unsqueeze(0).repeat(N, 1, 1)\n\n cam2world_matrix = torch.bmm(cam2world_matrix, _opencv2blender)\n\n world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3]\n\n ray_dirs = world_rel_points - cam_locs_world[:, None, :]\n ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2)\n\n ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1)\n\n return ray_origins, ray_dirs"
}
] | import itertools
import torch
import torch.nn as nn
from .utils.renderer import ImportanceRenderer
from .utils.ray_sampler import RaySampler | 4,667 | # ORIGINAL LICENSE
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# Modified by Zexin He
# The modifications are subject to the same license as the original.
class OSGDecoder(nn.Module):
"""
Triplane decoder that gives RGB and sigma values from sampled features.
Using ReLU here instead of Softplus in the original implementation.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L112
"""
def __init__(self, n_features: int,
hidden_dim: int = 64, num_layers: int = 4, activation: nn.Module = nn.ReLU):
super().__init__()
self.net = nn.Sequential(
nn.Linear(3 * n_features, hidden_dim),
activation(),
*itertools.chain(*[[
nn.Linear(hidden_dim, hidden_dim),
activation(),
] for _ in range(num_layers - 2)]),
nn.Linear(hidden_dim, 1 + 3),
)
# init all bias to zero
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
def forward(self, sampled_features, ray_directions):
# Aggregate features by mean
# sampled_features = sampled_features.mean(1)
# Aggregate features by concatenation
_N, n_planes, _M, _C = sampled_features.shape
sampled_features = sampled_features.permute(0, 2, 1, 3).reshape(_N, _M, n_planes*_C)
x = sampled_features
N, M, C = x.shape
x = x.contiguous().view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
sigma = x[..., 0:1]
return {'rgb': rgb, 'sigma': sigma}
class TriplaneSynthesizer(nn.Module):
"""
Synthesizer that renders a triplane volume with planes and a camera.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L19
"""
DEFAULT_RENDERING_KWARGS = {
'ray_start': 'auto',
'ray_end': 'auto',
'box_warp': 2.,
'white_back': True,
'disparity_space_sampling': False,
'clamp_mode': 'softplus',
'sampler_bbox_min': -1.,
'sampler_bbox_max': 1.,
}
def __init__(self, triplane_dim: int, samples_per_ray: int):
super().__init__()
# attributes
self.triplane_dim = triplane_dim
self.rendering_kwargs = {
**self.DEFAULT_RENDERING_KWARGS,
'depth_resolution': samples_per_ray // 2,
'depth_resolution_importance': samples_per_ray // 2,
}
# renderings
self.renderer = ImportanceRenderer()
| # ORIGINAL LICENSE
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# Modified by Zexin He
# The modifications are subject to the same license as the original.
class OSGDecoder(nn.Module):
"""
Triplane decoder that gives RGB and sigma values from sampled features.
Using ReLU here instead of Softplus in the original implementation.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L112
"""
def __init__(self, n_features: int,
hidden_dim: int = 64, num_layers: int = 4, activation: nn.Module = nn.ReLU):
super().__init__()
self.net = nn.Sequential(
nn.Linear(3 * n_features, hidden_dim),
activation(),
*itertools.chain(*[[
nn.Linear(hidden_dim, hidden_dim),
activation(),
] for _ in range(num_layers - 2)]),
nn.Linear(hidden_dim, 1 + 3),
)
# init all bias to zero
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
def forward(self, sampled_features, ray_directions):
# Aggregate features by mean
# sampled_features = sampled_features.mean(1)
# Aggregate features by concatenation
_N, n_planes, _M, _C = sampled_features.shape
sampled_features = sampled_features.permute(0, 2, 1, 3).reshape(_N, _M, n_planes*_C)
x = sampled_features
N, M, C = x.shape
x = x.contiguous().view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
sigma = x[..., 0:1]
return {'rgb': rgb, 'sigma': sigma}
class TriplaneSynthesizer(nn.Module):
"""
Synthesizer that renders a triplane volume with planes and a camera.
Reference:
EG3D: https://github.com/NVlabs/eg3d/blob/main/eg3d/training/triplane.py#L19
"""
DEFAULT_RENDERING_KWARGS = {
'ray_start': 'auto',
'ray_end': 'auto',
'box_warp': 2.,
'white_back': True,
'disparity_space_sampling': False,
'clamp_mode': 'softplus',
'sampler_bbox_min': -1.,
'sampler_bbox_max': 1.,
}
def __init__(self, triplane_dim: int, samples_per_ray: int):
super().__init__()
# attributes
self.triplane_dim = triplane_dim
self.rendering_kwargs = {
**self.DEFAULT_RENDERING_KWARGS,
'depth_resolution': samples_per_ray // 2,
'depth_resolution_importance': samples_per_ray // 2,
}
# renderings
self.renderer = ImportanceRenderer() | self.ray_sampler = RaySampler() | 1 | 2023-12-21 16:30:19+00:00 | 8k |
garinops/chat-E-AI | ai/openai/tools/tools.py | [
{
"identifier": "ToolTime",
"path": "ai/openai/tools/TOOL_TIME.py",
"snippet": "class ToolTime(object):\n TOOL_MODEL = {\n \"type\": \"function\",\n \"function\": {\n\n # [必填:value可编辑],注意所有Tools中保持唯一,且和下方的静态方法函数保持命名一致。\n \"name\": \"get_time\",\n\n # [必填:value可编辑],工具功能介绍。\n \"description\": \"获取指定地理位置的时间,如果未指定,默认为北京时间\",\n\n \"parameters\": {\n \"type\": \"object\",\n\n # [必填:value],联网工具参数。如果有参数,参照下方,自行配置增添所需参数,如果没有参数,则使用 \"properties\": {}, 。\n \"properties\": {\n # [选填:key可编辑],具体所需参数。\n \"location\": {\n # [必填:value可编辑],参数类型\n \"type\": \"string\",\n # [必填:value可编辑],参数描述。\n \"description\": \"中文地理位置。\",\n },\n \"offset_hours\": {\n # [必填:value可编辑],参数类型\n \"type\": \"string\",\n # [必填:value可编辑],参数描述。\n \"description\": \"该位置与UTC的小时偏移量,数字形式\",\n },\n },\n\n # [选填],需要OpenAI必须返回的参数,则在下方数组中指定。如果不需要,则使用 \"required\": [], 。\n \"required\": [\"location\", \"offset_hours\"],\n },\n }\n }\n\n @staticmethod\n def get_time(location: str, offset_hours: str) -> ResponseBase:\n response_tool = ResponseBase(answer=\"\", source=\"Time\")\n\n # 获取当前时间(UTC)\n current_time = datetime.utcnow()\n\n # 计算指定偏移量的时间\n offset = timedelta(hours=int(offset_hours))\n target_time = current_time + offset\n\n # 格式化时间\n format_time = target_time.strftime(\"%Y-%m-%d, %A, %H:%M:%S\")\n response_tool.answer = \"{}时间,{}\".format(location, format_time)\n\n return response_tool"
},
{
"identifier": "ToolWttrIn",
"path": "ai/openai/tools/WTTR_IN.py",
"snippet": "class ToolWttrIn(object):\n TOOL_MODEL = {\n # 工具模型类型,必填,目前API支持 function\n \"type\": \"function\",\n \"function\": {\n # 函数名称,必填,The name of the get_weather to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n \"name\": \"get_weather\",\n # 函数描述,必填,A description of what the get_weather does, used by the model to choose when and how to call the get_weather.\n \"description\": \"获取给定地理位置的天气数据\",\n # 函数参数,必填。\n \"parameters\": {\n \"type\": \"object\",\n # JSON键类型,及描述。\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"中文地理位置。\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n }\n\n @staticmethod\n @timeout_decorator.timeout(4)\n def get_weather(location: str) -> ResponseBase:\n language = pywttr.Language(\"zh-cn\")\n response_tool = ResponseBase(answer=\"\", source=\"wttr•in\")\n\n \"\"\"\"疑难杂症处理\"\"\"\n if location in [\"南极\"]:\n loggerToolWttrIn.warning(f'This Entry Did Not Make a Request to the Weather Server Due to a Bug in the Weather Server, Which May Be Fix in the Future.')\n response_tool.answer = \"亲爱的,我无法获取该地区的天气信息,你输入的地理位置是否过于宽泛呢?\"\n return response_tool\n if location == \"北极\":\n location = \"North Pole\"\n\n \"\"\"\"正经处理\"\"\"\n try:\n weather_wttr = pywttr.get_weather(location=location, language=language)\n # print(weather_wttr)\n weather = f\"当前{location}{weather_wttr.current_condition[0].lang_zh_cn[0].value},\" \\\n f\"{weather_wttr.current_condition[0].temp_c}°C,\" \\\n f\"风速{weather_wttr.current_condition[0].windspeed_kmph}km/h,\" \\\n f\"湿度{weather_wttr.current_condition[0].humidity}%,\" \\\n f\"降水{weather_wttr.current_condition[0].precip_mm}mm。\"\n response_tool.answer = weather\n except timeout_decorator.TimeoutError:\n loggerToolWttrIn.warning(f'Exception(timeout_decorator.TimeoutError) was encountered when get_weather({location})')\n response_tool.answer = \"亲爱的,wttr·in天气服务器可能发生了宕机,在使用过程中请节制访问。\"\n except requests.exceptions.ConnectionError:\n loggerToolWttrIn.warning(f'Exception(requests.exceptions.ConnectionError) was encountered when get_weather({location})')\n response_tool.answer = \"亲爱的,我可能失去了天气服务的网络连接。\"\n except urllib3.exceptions.MaxRetryError:\n loggerToolWttrIn.warning(f'Exception(urllib3.exceptions.MaxRetryError) was encountered when get_weather({location})')\n response_tool.answer = \"亲爱的,我遇到了障碍。\\n\\n这可能是有很多人在同时使用天气服务。\"\n except requests.exceptions.HTTPError:\n loggerToolWttrIn.warning(f'Exception(requests.exceptions.HTTPError) was encountered when get_weather({location})')\n response_tool.answer = \"亲爱的,我无法获取该地区的天气信息,大概是我们的尚没有收录该地区的天气情况。\\n\\n当然你也可以给我提供其他语言,这可能会增进我的理解。\"\n except json.decoder.JSONDecodeError:\n loggerToolWttrIn.warning(f'Exception(json.decoder.JSONDecodeError) was encountered when get_weather({location})')\n response_tool.answer = \"亲爱的,我无法获取该地区的天气信息,你输入的地理位置是否过于宽泛呢?\\n\\n当然你也可以给我提供其他语言,这可能会增进我的理解。\"\n return response_tool"
},
{
"identifier": "ToolWwwGarinassetCom",
"path": "ai/openai/tools/WWW_GARINASSET_COM.py",
"snippet": "class ToolWwwGarinassetCom(object):\n TOOL_MODEL = {\n # 工具类型,必填,强制function\n \"type\": \"function\",\n \"function\": {\n # 函数名称,必填,The name of the get_weather to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.\n \"name\": \"get_indicator_overview\",\n # 函数描述,必填,A description of what the get_weather does, used by the model to choose when and how to call the get_weather.\n \"description\": \"获取给定国家、行政区域的宏微观经济数据,行业数据,消费品市场价格数据,例如中国GDP,汽车产量,鸡蛋价格,如果没有给定行政区域,默认为中国大陆。\",\n # 函数参数,必填。\n \"parameters\": {\n \"type\": \"object\",\n # JSON键类型,及描述。\n \"properties\": {\n \"region\": {\n \"type\": \"string\",\n \"description\": \"中文行政区域,例如,中国大陆、香港。\",\n },\n \"name\": {\n \"type\": \"string\",\n \"description\": \"中文指标名称,例如国内生产总值,GDP,鸡蛋价格。\"\n },\n },\n \"required\": [\"region\", \"name\"],\n },\n }\n }\n\n @staticmethod\n def _handle_datetime(name_property_json, data_latest_time) -> str:\n match name_property_json:\n case \"月值\":\n _date = pd.Timestamp(data_latest_time)\n _dateTime = \"{}年{}月\".format(_date.year, _date.month)\n return _dateTime\n case \"累月值\":\n _date = pd.Timestamp(data_latest_time)\n _dateTime = \"{}年前{}月\".format(_date.year, _date.month)\n return _dateTime\n case \"季值\":\n _date = pd.Timestamp(data_latest_time)\n _dateTime = \"{}年{}季度\".format(_date.year, _date.quarter)\n return _dateTime\n case \"年值\":\n _date = pd.Timestamp(data_latest_time)\n _dateTime = \"{}年\".format(_date.year)\n return _dateTime\n case _:\n _date = pd.Timestamp(data_latest_time)\n _dateTime = \"{}年{}月{}日\".format(_date.year, _date.month, _date.day)\n return _dateTime\n\n @staticmethod\n def _handle_element(element):\n _name_attribute = None\n _data_year_over_year_diff = None\n _currency = None\n _unit = None\n\n if element['names']['name_attribute_json'] is not None:\n _name_attribute = \"(统计口径:{})\".format(element['names']['name_attribute_json'][0])\n if element['currencies'] is not None:\n _currency = element['currencies']['currency_json'][0]\n if element['units'] is not None:\n _unit = element['units']['unit_json'][0]\n if element['data_year_over_year'] is not None:\n data_year_over_year = element['data_year_over_year']['data_latest_value']\n data_year_over_year_fixed = element['data_year_over_year_fixed']\n _data_year_over_year_diff = round(data_year_over_year - data_year_over_year_fixed, 2)\n if _data_year_over_year_diff is not None and _data_year_over_year_diff > 0:\n _data_year_over_year_diff = \",同比:+{}%。\".format(_data_year_over_year_diff)\n else:\n _data_year_over_year_diff = \",同比:{}%。\".format(_data_year_over_year_diff)\n _data_latest_time = element['data']['data_latest_time']\n _name_property_json = element['names']['name_property_json'][0]\n _dateTime = ToolWwwGarinassetCom._handle_datetime(data_latest_time=_data_latest_time,\n name_property_json=_name_property_json)\n\n element_overview = \"{},{}{}{}为{}{}{}{}\".format(\n _dateTime,\n element['regions']['region_json'][0],\n element['names']['name_json'][0],\n _name_attribute if _name_attribute else \"\",\n round(element['data']['data_latest_value'], 2),\n _unit if _unit else \"\",\n _currency if _currency else \"\",\n _data_year_over_year_diff if _data_year_over_year_diff else \"。\",\n )\n return element_overview\n\n @staticmethod\n def get_indicator_overview(region: str, name: str) -> ResponseBase:\n response_tool = ResponseBase(answer=\"\", source=\"嘉林数据\")\n # API请求\n query = \"{} {}\".format(region, name)\n url = \"https://api.garinasset.com/www/v1/searches/indicators\"\n params = {\"q\": query}\n response_api = None\n\n try:\n response_api = requests.get(url, params=params)\n # 检查响应状态码,如果不是 2xx,会引发异常\n response_api.raise_for_status()\n # 返回响应的 JSON 数据\n response_api = response_api.json()\n\n indicator_overview = \"\"\n\n data_list = response_api['data']\n if data_list:\n for index, element in enumerate(data_list):\n if index == 0:\n indicator_overview = ToolWwwGarinassetCom._handle_element(element)\n elif index == 1:\n indicator_overview = indicator_overview + \"\\n\\n\" + \"相关数据:\\n\" + \"\\n\" + ToolWwwGarinassetCom._handle_element(\n element)\n elif index == 6:\n break\n else:\n indicator_overview = indicator_overview + \"\\n\\n\" + ToolWwwGarinassetCom._handle_element(element)\n response_tool.answer = indicator_overview\n return response_tool\n else:\n response_tool.answer = \"亲爱的,我无法获取该项数据信息,大概是数据商尚没有收录该数据。\\n\\n当然也可能是我错误理解了你的问题。\"\n return response_tool\n except requests.exceptions.HTTPError:\n _status_code = response_api.status_code\n if _status_code == 422:\n logger.warning(f\"Exception(requests.exceptions.HTTPError{_status_code}) was encountered when get_indicator_overview({region},{name})\")\n response_tool.answer = \"亲爱的,我无法提供相关的数据服务,你是否需要修改问题呢?\"\n return response_tool\n elif _status_code == 401:\n logger.warning(f\"Exception(requests.exceptions.HTTPError[{_status_code}]) was encountered when get_indicator_overview({region},{name})\")\n response_tool.answer = \"亲爱的,你没有嘉林数据的访问权限,暂时无法给你提供数据响应。\"\n return response_tool\n elif _status_code >= 500:\n logger.warning(f\"Exception(requests.exceptions.HTTPError[{_status_code}]) was encountered when get_indicator_overview({region},{name})\")\n response_tool.answer = \"亲爱的,宏微观经济数据库正在升级,暂时无法给你提供响应。\"\n return response_tool\n else:\n logger.warning(f\"Exception(requests.exceptions.HTTPError[{_status_code}]) was encountered when get_indicator_overview({region},{name})\")\n response_tool.answer = \"亲爱的,我遇到了未知的网络故障,这需要一定的处理时间。\"\n return response_tool\n except requests.exceptions.ConnectionError as e:\n logger.warning(f\"Exception(requests.exceptions.ConnectionError was encountered when get_indicator_overview({region},{name})\")\n response_tool.answer = \"亲爱的,我可能失去了宏微观经济数据库服务的网络连接。\"\n return response_tool\n except requests.exceptions.RequestException:\n logger.warning(f\"Exception(requests.exceptions.RequestException was encountered when get_indicator_overview({region},{name})\")\n response_tool.answer = \"亲爱的,我明白我现在的处境,程序运行发生了故障哦。\"\n return response_tool"
},
{
"identifier": "ToolXueqiuCom",
"path": "ai/openai/tools/XUEQIU_COM.py",
"snippet": "class ToolXueqiuCom(object):\n TOOL_MODEL = {\n # 工具模型类型,必填,目前API支持 function\n \"type\": \"function\",\n \"function\": {\n # 函数名称,必填\n \"name\": \"get_stock\",\n # 函数描述,必填\n \"description\": \"获取给定股票代码的股票信息,如果获取不到股票代码询问市场及代码。\",\n # 函数参数,必填。\n \"parameters\": {\n \"type\": \"object\",\n # JSON键类型,及描述。\n \"properties\": {\n \"name\": {\n \"type\": \"string\",\n \"description\": \"股票名称\",\n },\n \"symbol\": {\n \"type\": \"string\",\n \"description\": \"股票代码,上海市场前缀有SH,例如:SH600104;深圳市场前有缀SZ,例如:SZ300315;香港市场代码为五位数字,没有前缀,不足五位前面补'0',例如腾讯代码是‘00700’;其他市场保持原样,例如美国市场苹果:AAPL。\",\n },\n },\n \"required\": [\"name\", \"symbol\"],\n },\n }\n }\n\n @staticmethod\n @timeout_decorator.timeout(8)\n def get_stock(name: str, symbol: str) -> ResponseBase:\n response_tool = ResponseBase(answer=\"\", source=\"xueqiu•com\")\n _stock = ''\n try:\n _quote = pysnowball.quotec(symbols=symbol)\n _quoteD = _quote['data'][0]\n _xueQiu = XueQiu.model_construct()\n if _quoteD:\n _xueQiu = XueQiu(**_quoteD)\n _percent = ''\n if _xueQiu.percent:\n _percent = (\"下跌\" + str(_xueQiu.percent) + '%,') if _xueQiu.percent <0 else (\"上涨+\" + str(_xueQiu.percent) + '%,')\n else:\n _percent = ''\n\n _stock = \"{},代码{}。{}{}{}{}{}{}{}\".format(\n name,\n _xueQiu.symbol,\n (\"当前股价\" + str(_xueQiu.current)) if _xueQiu.current else '当前股价未获得',\n '(交易中),' if _xueQiu.is_trade else '(不在交易状态),',\n _percent,\n (\"成交金额\" + str(_xueQiu.amount)+ '。') if _xueQiu.amount else '。',\n (\"目前总市值\" + str(_xueQiu.market_capital) + ',') if _xueQiu.market_capital else '',\n (\"流动市值\"+ str(_xueQiu.float_market_capital) + '。') if _xueQiu.float_market_capital else '',\n (\"该股票年初至今涨跌\" + str(_xueQiu.current_year_percent) + '%。') if _xueQiu.current_year_percent else '',\n )\n else:\n raise ValueError\n response_tool.answer = _stock\n except ValueError:\n logger.warning(f'Exception(ValueError) was encountered when get_stock({symbol})')\n response_tool.answer = \"亲爱的,你能告诉我股票所在市场以及股票代码吗?这样可以增进我的理解。\"\n except timeout_decorator.TimeoutError:\n logger.warning(f'Exception(timeout_decorator.TimeoutError) was encountered when get_stock({symbol})')\n response_tool.answer = \"亲爱的,访问xueqiu•com服务超时,请在使用过程中保持节制。\"\n return response_tool"
},
{
"identifier": "OPENAI_TOOLS_CONFIG",
"path": "config/settings.py",
"snippet": "OPENAI_TOOLS_CONFIG = [\n {\"enable\": True, \"Tool\": ToolWwwGarinassetCom, \"description\": \"嘉林数据-宏微观经济数据库\"},\n {\"enable\": True, \"Tool\": ToolWttrIn, \"description\": \"wttr.in-天气\"},\n {\"enable\": True, \"Tool\": ToolTime, \"description\": \"time-时间\"},\n {\"enable\": True, \"Tool\": ToolXueqiuCom, \"description\": \"xueqiu.com-股票\"},\n]"
},
{
"identifier": "ResponseBase",
"path": "models/response.py",
"snippet": "class ResponseBase(BaseModel):\n answer: str\n source: str"
}
] | from ai.openai.tools.TOOL_TIME import ToolTime
from ai.openai.tools.WTTR_IN import ToolWttrIn
from ai.openai.tools.WWW_GARINASSET_COM import ToolWwwGarinassetCom
from ai.openai.tools.XUEQIU_COM import ToolXueqiuCom
from config.settings import OPENAI_TOOLS_CONFIG
from models.response import ResponseBase | 5,127 |
class OpenAITools:
@staticmethod
def get_tools() -> list:
tools = []
for tool_config in OPENAI_TOOLS_CONFIG:
if tool_config["enable"]:
tool_class = tool_config["Tool"]
tools.append(tool_class.TOOL_MODEL)
return tools
@staticmethod
def handle(name_tool_call: str, parameter_variables) -> ResponseBase:
"""1、处理路由OpenAI响应的function.name决定。"""
"""2、工具函数参数及变量值也是由OpenAI响应决定,需要具体工具具体相应处理。"""
match name_tool_call:
# 1.宏微观经济数据、行业数据、消费品市场价格数据工具-处理
case ToolWwwGarinassetCom.get_indicator_overview.__name__:
region = parameter_variables.get("region")
name = parameter_variables.get("name")
toolResponse = ToolWwwGarinassetCom.get_indicator_overview(region=region, name=name)
return toolResponse
# 2.天气工具-处理
case ToolWttrIn.get_weather.__name__:
location = parameter_variables.get("location")
|
class OpenAITools:
@staticmethod
def get_tools() -> list:
tools = []
for tool_config in OPENAI_TOOLS_CONFIG:
if tool_config["enable"]:
tool_class = tool_config["Tool"]
tools.append(tool_class.TOOL_MODEL)
return tools
@staticmethod
def handle(name_tool_call: str, parameter_variables) -> ResponseBase:
"""1、处理路由OpenAI响应的function.name决定。"""
"""2、工具函数参数及变量值也是由OpenAI响应决定,需要具体工具具体相应处理。"""
match name_tool_call:
# 1.宏微观经济数据、行业数据、消费品市场价格数据工具-处理
case ToolWwwGarinassetCom.get_indicator_overview.__name__:
region = parameter_variables.get("region")
name = parameter_variables.get("name")
toolResponse = ToolWwwGarinassetCom.get_indicator_overview(region=region, name=name)
return toolResponse
# 2.天气工具-处理
case ToolWttrIn.get_weather.__name__:
location = parameter_variables.get("location") | toolResponse = ToolWttrIn.get_weather(location=location) | 1 | 2023-12-16 17:02:13+00:00 | 8k |
ruudjuffermans/Event-Driven-Backtester | example.py | [
{
"identifier": "Loop",
"path": "backtester/loop.py",
"snippet": "class Loop:\n def __init__(\n self,\n data_handler,\n execution_handler,\n portfolio,\n strategy,\n heartbeat,\n ):\n self.heartbeat = heartbeat\n\n self.data_handler = data_handler\n self.execution_handler = execution_handler\n self.portfolio = portfolio\n self.strategy = strategy\n\n self.events = queue.Queue()\n self.signals = 0\n self.orders = 0\n self.fills = 0\n self.num_strats = 1\n\n self._set_datahandler()\n self._set_portfolio()\n self._set_execution_handler()\n self._set_strategy()\n\n def _set_datahandler(self):\n if isinstance(self.data_handler, CSVGenerator):\n self.data_handler.register(self.events)\n else:\n raise NotImplementedError(\"Data feed not implemented\")\n\n def _set_strategy(self):\n self.strategy.register(self.data_handler, self.events)\n\n def _set_portfolio(self):\n self.portfolio.register(self.data_handler, self.events)\n\n def _set_execution_handler(self):\n self.execution_handler.register(self.events)\n\n def _run_backtest(self):\n \"\"\"\n Executes the backtest.\n \"\"\"\n while True:\n if self.data_handler.continue_backtest:\n self.data_handler.update_bars()\n else:\n break\n while True:\n try:\n event = self.events.get(False)\n except queue.Empty:\n break\n else:\n if event is not None:\n if isinstance(event, MarketEvent):\n self.strategy.calculate(event)\n self.portfolio.update_timeindex(event)\n\n elif isinstance(event, SignalEvent):\n self.signals += 1\n self.portfolio.update_signal(event)\n\n elif isinstance(event, OrderEvent):\n self.orders += 1\n self.execution_handler.execute_order(event)\n\n elif isinstance(event, FillEvent):\n self.fills += 1\n self.portfolio.update_fill(event)\n\n time.sleep(self.heartbeat)\n\n def _output_performance(self):\n \"\"\"\n Outputs the strategy performance from the backtest.\n \"\"\"\n self.portfolio.create_equity_curve_dataframe()\n\n print(\"Creating summary stats...\")\n stats = self.portfolio.output_summary_stats()\n\n print(\"Creating equity curve...\")\n print(self.portfolio.equity_curve.tail(10))\n\n pprint.pprint(stats)\n print(\"Signals: %s\" % self.signals)\n print(\"Orders: %s\" % self.orders)\n print(\"Fills: %s\" % self.fills)\n\n def start(self):\n \"\"\"\n Simulates the backtest and outputs portfolio performance.\n \"\"\"\n self._run_backtest()\n self._output_performance()"
},
{
"identifier": "CSVGenerator",
"path": "backtester/generator/csvgenerator.py",
"snippet": "class CSVGenerator(Generator):\n def __init__(self, symbol_list):\n self.csv_dir = Path.cwd() / \"data\"\n self.symbol_list = symbol_list\n\n self.symbol_data = {}\n self.latest_symbol_data = {}\n self.continue_backtest = True\n self._load()\n\n def register(self, events):\n self.events = events\n\n def _load(self):\n combined_index = None\n for symbol in self.symbol_list:\n self.symbol_data[symbol] = pd.io.parsers.read_csv(\n os.path.join(self.csv_dir, \"%s.csv\" % symbol),\n header=0,\n index_col=0,\n names=[\n \"datetime\",\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"adj_close\",\n \"volume\",\n ],\n )\n\n # Combine the index to pad forward values\n if combined_index is None:\n combined_index = self.symbol_data[symbol].index\n else:\n combined_index.union(self.symbol_data[symbol].index)\n\n # Set the latest symbol_data to None\n self.latest_symbol_data[symbol] = []\n\n # Reindex the dataframes\n for symbol in self.symbol_list:\n self.symbol_data[symbol] = (\n self.symbol_data[symbol]\n .reindex(index=combined_index, method=\"pad\")\n .iterrows()\n )\n\n def _get_new_bar(self, symbol):\n for bar in self.symbol_data[symbol]:\n yield bar\n\n def get_latest_bar(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1]\n\n def get_latest_bars(self, symbol, N=1):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-N:]\n\n def get_latest_bar_datetime(self, symbol):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return bars_list[-1][0]\n\n def get_latest_bar_value(self, symbol, value_type):\n try:\n bars_list = self.latest_symbol_data[symbol]\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return getattr(bars_list[-1][1], value_type)\n\n def get_latest_bars_values(self, symbol, value_type, N=1):\n try:\n bars_list = self.get_latest_bars(symbol, N)\n except KeyError:\n print(\"That symbol is not available in the historical data set.\")\n raise\n else:\n return np.array([getattr(bar[1], value_type) for bar in bars_list])\n\n def update_bars(self):\n for symbol in self.symbol_list:\n try:\n bar = next(self._get_new_bar(symbol))\n except StopIteration:\n self.continue_backtest = False\n else:\n if bar is not None:\n self.latest_symbol_data[symbol].append(bar)\n self.events.put(MarketEvent())"
},
{
"identifier": "SimulatedExecutionHandler",
"path": "backtester/execution.py",
"snippet": "class SimulatedExecutionHandler(ExecutionHandler):\n def __init__(self):\n pass\n\n def execute_order(self, event):\n if isinstance(event, OrderEvent):\n fill_event = FillEvent(\n datetime.utcnow(),\n event.symbol,\n \"BT\",\n event.quantity,\n event.direction,\n None,\n )\n self.events.put(fill_event)"
},
{
"identifier": "Portfolio",
"path": "backtester/portfolio.py",
"snippet": "class Portfolio:\r\n def __init__(self, window, initial_capital=100000.0):\r\n self.start_date = window.start\r\n self.initial_capital = initial_capital\r\n\r\n def register(self, bars, events):\r\n self.bars = bars\r\n\r\n self.events = events\r\n self.symbol_list = self.bars.symbol_list\r\n self.current_positions = {symbol: 0 for symbol in self.symbol_list}\r\n self.all_positions = self.define_all_positions()\r\n self.all_holdings = self.define_all_holdings()\r\n self.current_holdings = self.define_current_holdings()\r\n\r\n def define_all_positions(self):\r\n positions = {symbol: 0 for symbol in self.symbol_list}\r\n positions[\"datetime\"] = self.start_date\r\n return [positions]\r\n\r\n def define_all_holdings(self):\r\n holdings = {symbol: 0.0 for symbol in self.symbol_list}\r\n holdings[\"datetime\"] = self.start_date\r\n holdings[\"cash\"] = self.initial_capital\r\n holdings[\"commission\"] = 0.0\r\n holdings[\"total\"] = self.initial_capital\r\n return [holdings]\r\n\r\n def define_current_holdings(self):\r\n holdings = {symbol: 0.0 for symbol in self.symbol_list}\r\n holdings[\"cash\"] = self.initial_capital\r\n holdings[\"commission\"] = 0.0\r\n holdings[\"total\"] = self.initial_capital\r\n return holdings\r\n\r\n def update_timeindex(self, event):\r\n latest_datetime = self.bars.get_latest_bar_datetime(self.symbol_list[0])\r\n\r\n positions = {\r\n symbol: self.current_positions[symbol] for symbol in self.symbol_list\r\n }\r\n positions[\"datetime\"] = latest_datetime\r\n\r\n self.all_positions.append(positions)\r\n holdings = {symbol: 0.0 for symbol in self.symbol_list}\r\n holdings[\"datetime\"] = latest_datetime\r\n holdings[\"cash\"] = self.current_holdings[\"cash\"]\r\n holdings[\"commission\"] = self.current_holdings[\"commission\"]\r\n holdings[\"total\"] = self.current_holdings[\"cash\"]\r\n\r\n for symbol in self.symbol_list:\r\n market_value = self.current_positions[\r\n symbol\r\n ] * self.bars.get_latest_bar_value(symbol, \"close\")\r\n holdings[symbol] = market_value\r\n holdings[\"total\"] += market_value\r\n\r\n self.all_holdings.append(holdings)\r\n\r\n def update_positions_after_fill(self, fill):\r\n fill_dir = 0\r\n if fill.direction == \"BUY\":\r\n fill_dir = 1\r\n if fill.direction == \"SELL\":\r\n fill_dir = -1\r\n self.current_positions[fill.symbol] += fill_dir * fill.quantity\r\n\r\n def update_holdings_after_fill(self, fill):\r\n fill_dir = 0\r\n if fill.direction == \"BUY\":\r\n fill_dir = 1\r\n if fill.direction == \"SELL\":\r\n fill_dir = -1\r\n fill_cost = self.bars.get_latest_bar_value(fill.symbol, \"close\")\r\n cost = fill_dir * fill_cost * fill.quantity\r\n self.current_holdings[fill.symbol] += cost\r\n self.current_holdings[\"commission\"] += fill.commission\r\n self.current_holdings[\"cash\"] -= cost + fill.commission\r\n self.current_holdings[\"total\"] -= cost + fill.commission\r\n\r\n def update_fill(self, event):\r\n if isinstance(event, FillEvent):\r\n self.update_positions_after_fill(event)\r\n self.update_holdings_after_fill(event)\r\n\r\n def generate_naive_order(self, signal):\r\n order = None\r\n symbol = signal.symbol\r\n direction = signal.signal_type\r\n strength = signal.strength\r\n\r\n mkt_quantity = floor(100 * strength)\r\n current_quantity = self.current_positions[symbol]\r\n order_type = \"MKT\"\r\n\r\n if direction == \"LONG\" and current_quantity == 0:\r\n order = OrderEvent(symbol, order_type, mkt_quantity, \"BUY\")\r\n if direction == \"SHORT\" and current_quantity == 0:\r\n order = OrderEvent(symbol, order_type, mkt_quantity, \"SELL\")\r\n if direction == \"EXIT\" and current_quantity > 0:\r\n order = OrderEvent(symbol, order_type, abs(current_quantity), \"SELL\")\r\n if direction == \"EXIT\" and current_quantity < 0:\r\n order = OrderEvent(symbol, order_type, abs(current_quantity), \"BUY\")\r\n return order\r\n\r\n def update_signal(self, event):\r\n if isinstance(event, SignalEvent):\r\n order_event = self.generate_naive_order(event)\r\n self.events.put(order_event)\r\n\r\n def create_equity_curve_dataframe(self):\r\n equity_curve = pd.DataFrame(self.all_holdings)\r\n equity_curve.set_index(\"datetime\", inplace=True)\r\n equity_curve[\"returns\"] = equity_curve[\"total\"].pct_change()\r\n equity_curve[\"equity_curve\"] = (1.0 + equity_curve[\"returns\"]).cumprod()\r\n self.equity_curve = equity_curve\r\n\r\n def output_summary_stats(self):\r\n total_return = self.equity_curve[\"equity_curve\"][-1]\r\n returns = self.equity_curve[\"returns\"]\r\n pnl = self.equity_curve[\"equity_curve\"]\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252 * 60 * 6.5)\r\n drawdown, max_dd, max_dd_duration = create_drawdowns(pnl)\r\n self.equity_curve[\"drawdown\"] = drawdown\r\n\r\n stats = [\r\n (\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Max Drawdown Duration\", \"%d\" % max_dd_duration),\r\n ]\r\n self.equity_curve.to_csv(\"equity.csv\")\r\n return stats\r"
},
{
"identifier": "MACrossOverStrategy",
"path": "backtester/strategy/ma_crossover.py",
"snippet": "class MACrossOverStrategy(Strategy):\n def __init__(self, short_window=100, long_window=400):\n self.short_window = short_window\n self.long_window = long_window\n\n def register(self, bars, events):\n self.bars = bars\n self.events = events\n self.symbol_list = self.bars.symbol_list\n self.bought = self._calculate_initial_bought()\n\n def _calculate_initial_bought(self):\n bought = {symbol: \"OUT\" for symbol in self.symbol_list}\n return bought\n\n def calculate(self, event):\n if isinstance(event, MarketEvent):\n for symbol in self.symbol_list:\n bars = self.bars.get_latest_bars_values(\n symbol, \"close\", N=self.long_window\n )\n bar_datetime = self.bars.get_latest_bar_datetime(symbol)\n\n if bars is not None and bars != []:\n print(bar_datetime)\n\n # long_sma = np.mean( bars[-self.long_window:] )\n\n # dt = datetime.datetime.utcnow()\n # signal_type = \"\"\n # strength = 1.0\n\n # if short_sma > long_sma and self.bought[symbol] == \"OUT\":\n # print(\"LONG position at: %s\" % bar_datetime)\n # signal_type = \"LONG\"\n # signal = SignalEvent(symbol, dt, signal_type, strength)\n # self.events.put(signal)\n # self.bought[symbol] = \"LONG\"\n\n # elif short_sma < long_sma and self.bought[symbol] == \"LONG\":\n # print(\"SHORT position at: %s\" % bar_datetime)\n # signal_type = \"EXIT\"\n # signal = SignalEvent(symbol, dt, signal_type, strength)\n # self.events.put(signal)\n # self.bought[symbol] = \"OUT\""
},
{
"identifier": "Window",
"path": "backtester/types.py",
"snippet": "class Window:\n def __init__(self, start, end, interval):\n self.start = start\n self.end = end\n self.interval = interval"
}
] | from datetime import datetime
from backtester.loop import Loop
from backtester.generator import CSVGenerator
from backtester.execution import SimulatedExecutionHandler
from backtester.portfolio import Portfolio
from backtester.strategy import MACrossOverStrategy
from backtester.types import Window | 3,645 |
symbol_list = ["BIG"]
window = Window(
start=datetime(2016, 1, 1, 0, 0, 0),
end=datetime(2021, 1, 1, 0, 0, 0),
interval="1d",
)
generator = CSVGenerator(symbol_list)
portfolio = Portfolio(window, 100000.0)
strategy = MACrossOverStrategy()
execution = SimulatedExecutionHandler()
|
symbol_list = ["BIG"]
window = Window(
start=datetime(2016, 1, 1, 0, 0, 0),
end=datetime(2021, 1, 1, 0, 0, 0),
interval="1d",
)
generator = CSVGenerator(symbol_list)
portfolio = Portfolio(window, 100000.0)
strategy = MACrossOverStrategy()
execution = SimulatedExecutionHandler()
| backtest = Loop(generator, execution, portfolio, strategy, 0.0) | 0 | 2023-12-16 21:09:00+00:00 | 8k |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.